input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>pyflight/requester.py<gh_stars>1-10
"""
Provides an easy-to-use interface to use pyflight with.
"""
import re
from typing import List, Optional, Union
from .api import requester
from .result import Result
BASE_URL = 'https://www.googleapis.com/qpxExpress/v1/trips/search?key='
__API_KEY = ''
MAX_PRICE_REGEX = re.compile(r'[A-Z]{3}\d+(\.\d+)?')
ALLOWED_PREFERRED_CABINS = 'COACH', 'PREMIUM_COACH', 'BUSINESS', 'FIRST'
class Slice:
"""Represents a slice that makes up a single itinerary of this trip.
For example, for one-way trips, usually one slice is used.
A round trip would use two slices. (e.g. SFO - FRA - SFO)
Optional attributes default to ``None`` or an empty list if applicable,
but can be set if wanted.
Attributes
----------
raw_data : dict
The raw JSON / dictionary data which will be sent to the API.
origin : str
The airport or city IATA designator of the origin.
destination : str
The airport or city IATA designator of the destination.
date : str
The date on which this flight should take place,
in the format YYYY-MM-DD.
max_stops : Optional[int]
The maximum amount of stops that the passenger(s)
are willing to accept on this slice.
max_connection_duration : Optional[int]
The longest duration (in minutes) between two legs
that passengers are willing to accept
preferred_cabin : Optional[str]
The preferred cabin for this slice.
Allowed values are COACH, PREMIUM_COACH, BUSINESS, and FIRST.
A :class:`ValueError` is raised if a value is assigned that is
not listed above.
earliest_departure_time : Optional[str]
The earliest time for departure, local to the point of departure.
Formatted as HH:MM.
latest_departure_time : Optional[str]
The latest time for departure, local to the point of departure.
Formatted as HH:MM.
permitted_carriers : List[str]
A list of 2-letter IATA airline designators for
which results should be returned.
prohibited_carriers : List[str]
A list of 2-letter IATA airline designators,
for which no results will be returned.
"""
def __init__(self, origin: str, destination: str, date: str):
"""Create a new slice.
Parameters
----------
origin : str
The airport or city IATA designator of the origin.
destination : str
The airport or city IATA designator of the destination.
date : str
The date on which this flight should take place,
in the format YYYY-MM-DD.
"""
self.raw_data = {
'kind': 'qpxexpress#sliceInput',
'origin': origin,
'destination': destination,
'date': date
}
@property
def origin(self) -> str:
"""The airport or city IATA designator of the origin."""
return self.raw_data['origin']
@origin.setter
def origin(self, new_origin: str):
self.raw_data['origin'] = new_origin
@property
def destination(self) -> str:
"""The airport or city IATA designator of the destination."""
return self.raw_data['destination']
@destination.setter
def destination(self, new_destination: str):
self.raw_data['destination'] = new_destination
@property
def date(self) -> str:
"""
The date on which this flight should take place,
in the format YYYY-MM-DD.
"""
return self.raw_data['date']
@date.setter
def date(self, new_date: str):
self.raw_data['date'] = new_date
@property
def max_stops(self) -> Optional[int]:
"""
The maximum amount of stops that the passenger(s)
are willing to accept on this slice.
"""
return self.raw_data.get('maxStops', None)
@max_stops.setter
def max_stops(self, max_stops: int):
self.raw_data['max_stops'] = max_stops
@property
def max_connection_duration(self) -> Optional[int]:
"""
The longest duration (in minutes) between two legs
that passengers are willing to accept
"""
return self.raw_data.get('maxConnectionDuration', None)
@max_connection_duration.setter
def max_connection_duration(self, new_max_duration: int):
self.raw_data['maxConnectionDuration'] = new_max_duration
@property
def preferred_cabin(self) -> Optional[str]:
"""
The preferred cabin for this slice.
Allowed values are COACH, PREMIUM_COACH, BUSINESS, and FIRST.
A :class:`ValueError` is raised if a value is assigned that is
not listed above.
"""
return self.raw_data.get('preferredCabin', None)
@preferred_cabin.setter
def preferred_cabin(self, new_preferred_cabin: str):
if new_preferred_cabin not in ALLOWED_PREFERRED_CABINS:
raise ValueError('Invalid value for preferred_cabin')
self.raw_data['preferredCabin'] = new_preferred_cabin
@property
def _permitted_departure_time(self) -> dict:
if 'permittedDepartureTime' not in self.raw_data:
self.raw_data['permittedDepartureTime'] = {
'kind': 'qpxexpress#timeOfDayRange'
}
return self.raw_data['permittedDepartureTime']
@property
def earliest_departure_time(self) -> Optional[str]:
"""
The earliest time for departure, local to the point of departure.
Formatted as HH:MM.
"""
return self._permitted_departure_time.get('earliestTime', None)
@earliest_departure_time.setter
def earliest_departure_time(self, new_edt: str):
self._permitted_departure_time['earliestTime'] = new_edt
@property
def latest_departure_time(self) -> Optional[str]:
"""
The latest time for departure, local to the point of departure.
Formatted as HH:MM.
"""
return self._permitted_departure_time.get('latestTime', None)
@latest_departure_time.setter
def latest_departure_time(self, new_ldt: str):
self._permitted_departure_time['latestTime'] = new_ldt
@property
def permitted_carriers(self) -> List[str]:
"""
A list of 2-letter IATA airline designators for
which results should be returned.
"""
return self.raw_data.get('permittedCarrier', [])
@permitted_carriers.setter
def permitted_carriers(self, new_permitted_carriers: list):
self.raw_data['permittedCarrier'] = new_permitted_carriers
@property
def prohibited_carriers(self) -> List[str]:
"""
A list of 2-letter IATA airline designators,
for which no results will be returned.
"""
return self.raw_data.get('prohibitedCarrier', [])
@prohibited_carriers.setter
def prohibited_carriers(self, new_prohibited_carriers: list):
self.raw_data['prohibitedCarrier'] = new_prohibited_carriers
class Request:
r"""Represents a Request that can be sent to the API instead
of using a dictionary manually.
Please note that each Request requires at least
1 adult or senior passenger.
Optional attributes default to ``None``.
Attributes
----------
raw_data : dict
The raw JSON / dictionary data which will be sent to the API.
adult_count : int
The amount of passengers that are adults.
children_count : int
The amount of passengers that are children.
infant_in_lap_count : int
The amount of passengers that are infants
travelling in the lap of an adult.
infant_in_seat_count : int
The amount of passengers that are infants assigned a seat.
senior_count : int
The amount of passengers that are senior citizens.
max_price : Optional[str]
The maximum price below which results should be returned.
The currency is specified in ISO-4217, and setting
this attribute is validated using the regex ``[A-Z]{3}\d+(\.\d+)?``.
If it does not match, a :class:`ValueError` is raised.
sale_country : Optional[str]
The IATA country code representing the point of sale.
Determines the currency.
ticketing_country : Optional[str]
The IATA country code representing the point of ticketing,
for example ``DE``.
refundable : Optional[bool]
Whether to return only results with refundable fares or not.
solution_count : int
The amount of solutions to return. Defaults to 1, maximum is 500.
Raises a :class:`ValueError` when trying to
assign a value outside of 1 to 500.
"""
def __init__(self):
"""Create a new Request."""
self.raw_data = {
'request': {
'passengers': {},
'slice': [],
'solutions': 1
}
}
def add_slice(self, slice_: Slice):
"""Adds a slice to this Request.
Parameters
----------
slice_ : :class:`Slice`
The Slice to be added to the request.
Returns
-------
self
To ease chaining of this function, ``self`` is returned.
"""
self.raw_data['request']['slice'].append(slice_.raw_data)
return self
def as_dict(self) -> dict:
"""
Returns the raw data associated with this request,
which is sent to the API when calling send_sync or send_async.
"""
return self.raw_data
def send_sync(self, use_containers: bool = True) -> Union[Result, dict]:
"""Synchronously execute a request.
Internally, this calls :meth:`pyflight.send_sync()`.
You can also call the function directly.
For further information, please view
documentation for :meth:`pyflight.send_sync()`.
"""
return send_sync(self, use_containers=use_containers)
async def send_async(self, use_containers: bool = True) -> Union[Result, dict]:
"""Asynchronously execute a request.
Internally, this calls :meth:`pyflight.send_async()`.
You can also call the function directly. For further information,
please view documentation for :meth:`pyflight.send_async()`.
"""
return send_async(self, use_containers=use_containers)
@property
def adult_count(self) -> int:
"""The amount of passengers that are adults."""
return self.raw_data['request']['passengers'].get('adultCount', 0)
@adult_count.setter
def adult_count(self, count: int):
self.raw_data['request']['passengers']['adultCount'] = count
@property
def children_count(self) -> int:
"""The amount of passengers that are children."""
return self.raw_data['request']['passengers'].get('childrenCount', 0)
@children_count.setter
def children_count(self, count: int):
self.raw_data['request']['passengers']['childrenCount'] = count
@property
def infant_in_lap_count(self) -> int:
"""
The amount of passengers that are infants
travelling in the lap of an adult.
"""
return self.raw_data['request']['passengers'].get('infantInLapCount', 0)
@infant_in_lap_count.setter
def infant_in_lap_count(self, count: int):
self.raw_data['request']['passengers']['infantInLapCount'] = count
@property
def infant_in_seat_count(self) -> int:
"""The amount of passengers that are infants assigned a seat."""
return self.raw_data['request']['passengers'].get(
'infantInSeatCount', 0
)
@infant_in_seat_count.setter
def infant_in_seat_count(self, count: int):
self.raw_data['request']['passengers']['infantInSeatCount'] = count
@property
def senior_count(self) -> int:
"""The amount of passengers that are senior citizens."""
return self.raw_data['request']['passengers'].get('seniorCount', 0)
@senior_count.setter
def senior_count(self, count: int):
self.raw_data['request']['passengers']['seniorCount'] = count
@property
def max_price(self) -> Optional[str]:
"""
The maximum price below which results should be returned,
specified in ISO-421 format.
"""
return self.raw_data['request'].get('maxPrice', None)
@max_price.setter
def max_price(self, max_price: str):
if not re.match(MAX_PRICE_REGEX, max_price):
err_msg = 'max_price given (\'{}\') does not match ISO-4217 format'
raise ValueError(err_msg.format(max_price))
self.raw_data['request']['maxPrice'] = max_price
@property
def sale_country(self) -> Optional[str]:
"""
The IATA country code representing the point of sale.
Determines the currency.
"""
return self.raw_data['request'].get('saleCountry', None)
@sale_country.setter
def sale_country(self, sale_country: str):
self.raw_data['request']['saleCountry'] = sale_country
@property
def ticketing_country(self) -> | |
<filename>models.py<gh_stars>0
'''
@Author: ConghaoWong
@Date: 2019-12-20 09:39:34
LastEditors: <NAME>
LastEditTime: 2020-09-16 16:27:24
@Description: classes and methods of training model
'''
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tqdm import tqdm
from GridRefine import SocialRefine_one
from helpmethods import calculate_ADE_FDE_numpy, dir_check, list2array
from sceneFeature import TrajectoryMapManager
from visual import TrajVisual
class Base_Model():
"""
Base model for prediction.
Following items should be given when using this model:
```
self.create_model(self), # create prediction model
self.loss(self, model_output, gt, obs='null'), # loss function when training model
self.loss_eval(self, model_output, gt, obs='null'), # loss function when test model
self.forward_train(self, mode_inputs), # model result in training steps
self.forward_test(self, test_tensor:list). # model result in test steps
```
"""
def __init__(self, train_info, args):
self.args = args
self.train_info = train_info
def run_commands(self):
self.get_data() # 获取与训练数据有关的信息
if self.args.load == 'null':
self.model, self.optimizer = self.create_model()
self.model.summary()
self.train()
else:
self.model, self.agents_test = self.load_from_checkpoint()
self.model.summary()
if self.args.test:
self.test_batch(
self.agents_test,
test_on_neighbors=False,
batch_size=0.2, # set 0.5 on toy exp
social_refine=self.args.sr_enable,
draw_results=self.args.draw_results,
save_agents=False,
)
def get_data(self):
self.obs_frames = self.args.obs_frames
self.pred_frames = self.args.pred_frames
self.total_frames = self.obs_frames + self.pred_frames
self.log_dir = dir_check(self.args.log_dir)
if not self.args.load == 'null':
return
self.agents_train = self.train_info['train_data']
self.agents_test = self.train_info['test_data']
self.train_number = self.train_info['train_number']
self.sample_time = self.train_info['sample_time']
def load_from_checkpoint(self):
base_path = self.args.load + '{}'
if self.args.save_best:
best_epoch = np.loadtxt(os.path.join(self.args.log_dir, 'best_ade_epoch.txt'))[1].astype(int)
model = keras.models.load_model(base_path.format('_epoch{}.h5'.format(best_epoch)))
else:
model = keras.models.load_model(base_path.format('.h5'))
agents_test = np.load(base_path.format('test.npy'), allow_pickle=True)
return model, agents_test
def create_model(self):
raise 'MODEL is not defined!'
return model, optimizer
def loss(self, model_output, gt, obs='null'):
"""
Train loss, using ADE by default
"""
self.loss_namelist = ['ADE_t']
loss_ADE = calculate_ADE(model_output[0], gt)
loss_list = tf.stack([loss_ADE])
return loss_ADE, loss_list
def loss_eval(self, model_output, gt, obs='null'):
"""
Eval metrics, using ADE and FDE by default.
return: `np.array`
"""
self.loss_eval_namelist = ['ADE', 'FDE']
return calculate_ADE(model_output[0], gt).numpy(), calculate_FDE(model_output[0], gt).numpy()
def prepare_model_inputs_all(self, input_agents):
model_inputs = []
gt = []
agent_index = []
for agent_index_current, agent in enumerate(tqdm(input_agents, desc='Prepare inputs...')):
model_inputs.append(agent.get_train_traj())
gt.append(agent.get_gt_traj())
agent_index.append(agent_index_current)
model_inputs = tf.cast(tf.stack(model_inputs), tf.float32)
gt = tf.cast(tf.stack(gt), tf.float32)
return [model_inputs, gt], agent_index
def prepare_model_inputs_batch(self, train_tensor=0, batch_size=0, init=False):
"""
Get batch data from all data
"""
if init:
self.batch_start = 0
self.train_length = len(train_tensor[1])
return self.train_length
start = self.batch_start
end = (self.batch_start + batch_size) % self.train_length
# 每次最多取 1 epoch
if end < start:
if type(train_tensor[0]) == list:
train_inputs = [
tf.concat([
train_input[start:],
train_input[:end],
], axis=0) for train_input in train_tensor[0]
]
else:
train_inputs = tf.concat([
train_tensor[0][start:],
train_tensor[0][:end],
], axis=0)
gt = tf.concat([
train_tensor[1][start:],
train_tensor[1][:end],
], axis=0)
elif start + batch_size < self.train_length:
if type(train_tensor[0]) == list:
train_inputs = [train_input[start:end] for train_input in train_tensor[0]]
else:
train_inputs = train_tensor[0][start:end]
gt = train_tensor[1][start:end]
else:
train_inputs = train_tensor[0]
gt = train_tensor[1]
self.batch_start = end
return train_inputs, gt, len(gt)
def forward_train(self, model_inputs):
"""
Run a training implement
"""
output = self.model(model_inputs)
if not type(output) == list:
output = [output]
return output
def forward_test(self, test_tensor:list):
"""
Run test once.
`test_tensor` is a `list`. `test_tensor[0]` is the inputs of model and `test_tensor[1]` are their grount truths.
"""
model_inputs = test_tensor[0]
gt = test_tensor[1]
output = self.model(model_inputs)
if not type(output) == list:
output = [output]
return output, gt, model_inputs
def test_during_training(self, test_tensor, input_agents, test_index):
"""
Run test during training.
Results will NOT be written to inputs.
"""
model_output, gt, obs = self.forward_test(test_tensor)
loss_eval = self.loss_eval(model_output, gt, obs=obs)
return model_output, loss_eval, gt, input_agents
def train(self):
"""
Train the built model `self.model`
"""
batch_number = int(np.ceil(self.train_number / self.args.batch_size))
summary_writer = tf.summary.create_file_writer(self.args.log_dir)
print('\n-----------------dataset options-----------------')
if self.args.train_percent[0] and self.args.train_type == 'all':
print('Sampling data from training sets. ({}x)'.format(self.args.train_percent))
if self.args.reverse:
print('Using reverse data to train. (2x)')
if self.args.add_noise:
print('Using noise data to train. ({}x)'.format(self.args.add_noise))
if self.args.rotate:
print('Using rotate data to train. ({}x)'.format(self.args.rotate))
print('train_number = {}, total {}x train samples.'.format(self.train_number, self.sample_time))
print('-----------------training options-----------------')
print('model_name = {}, \ndataset = {},\nbatch_number = {},\nbatch_size = {},\nlr={}'.format(
self.args.model_name,
self.args.test_set,
batch_number,
self.args.batch_size,
self.args.lr,
))
print('\nPrepare training data...')
self.train_tensor, self.train_index = self.prepare_model_inputs_all(self.agents_train)
self.test_tensor, self.test_index = self.prepare_model_inputs_all(self.agents_test)
train_length = self.prepare_model_inputs_batch(self.train_tensor, init=True)
if self.args.save_model:
self.test_data_save_path = os.path.join(self.args.log_dir, '{}.npy'.format(self.args.model_name + '{}'))
np.save(self.test_data_save_path.format('test'), self.agents_test)
np.save(self.test_data_save_path.format('args'), self.args)
test_results = []
test_loss_dict = dict()
test_loss_dict['-'] = 0
batch_number = 1 + (train_length * self.args.epochs)// self.args.batch_size
print(batch_number, train_length, self.args.epochs, self.args.batch_size)
time_bar = tqdm(range(batch_number), desc='Training...')
best_ade = 100.0
best_epoch = 0
for batch in time_bar:
ADE = 0
ADE_move_average = tf.cast(0.0, dtype=tf.float32) # 计算移动平均
loss_list = []
obs_current, gt_current, train_sample_number = self.prepare_model_inputs_batch(self.train_tensor, self.args.batch_size)
if train_sample_number < 20:
continue
with tf.GradientTape() as tape:
model_output_current = self.forward_train(obs_current)
loss_ADE, loss_list_current = self.loss(model_output_current, gt_current, obs=obs_current)
ADE_move_average = 0.7 * loss_ADE + 0.3 * ADE_move_average
ADE += loss_ADE
grads = tape.gradient(ADE_move_average, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
loss_list.append(loss_list_current)
loss_list = tf.reduce_mean(tf.stack(loss_list), axis=0).numpy()
epoch = (batch * self.args.batch_size) // train_length
if (epoch >= self.args.start_test_percent * self.args.epochs) and (epoch % self.args.test_step == 0):
model_output, loss_eval, _, _ = self.test_during_training(self.test_tensor, self.agents_test, self.test_index)
test_results.append(loss_eval)
test_loss_dict = create_loss_dict(loss_eval, self.loss_eval_namelist)
ade_current = loss_eval[0]
if ade_current <= best_ade:
best_ade = ade_current
best_epoch = epoch
if self.args.save_best:
self.model.save(os.path.join(self.args.log_dir, '{}_epoch{}.h5'.format(self.args.model_name, epoch)))
np.savetxt(os.path.join(self.args.log_dir, 'best_ade_epoch.txt'), np.array([best_ade, best_epoch]))
if epoch % 2 == 0:
train_loss_dict = create_loss_dict(loss_list, self.loss_namelist)
loss_dict = dict(train_loss_dict, **test_loss_dict) # 拼接字典
time_bar.set_postfix(loss_dict)
with summary_writer.as_default():
for loss_name in loss_dict:
value = loss_dict[loss_name]
tf.summary.scalar(loss_name, value, step=epoch)
print('Training done.')
print('Tensorboard training log file is saved at "{}"'.format(self.args.log_dir))
print('To open this log file, please use "tensorboard --logdir {} --port 54393"'.format(self.args.log_dir))
latest_epochs = 10
test_results = list2array(test_results)
latest_results = np.mean(test_results[-latest_epochs-1:-1, :], axis=0)
print('In latest {} test epochs, average test loss = {}'.format(
latest_epochs,
latest_results
))
np.savetxt(os.path.join(self.args.log_dir, 'train_log.txt'), list2array(test_results))
if self.args.save_model:
self.model_save_path = os.path.join(self.args.log_dir, '{}.h5'.format(self.args.model_name))
self.model.save(self.model_save_path)
print('Trained model is saved at "{}".'.format(self.model_save_path.split('.h5')[0]))
print('To re-test this model, please use "python main.py --load {}".'.format(self.model_save_path.split('.h5')[0]))
model_name = self.model_save_path.split('.h5')[0].split('/')[-1]
np.savetxt('./results/result-{}{}.txt'.format(model_name, self.args.test_set), latest_results)
with open('./results/path-{}{}.txt'.format(model_name, self.args.test_set), 'w+') as f:
f.write(self.model_save_path.split('.h5')[0])
def test_batch(self, agents_test, test_on_neighbors=False, draw_results=True, batch_size=0.2, save_agents=False, social_refine=False):
"""
Eval model on test sets.
Results WILL be written to inputs.
测试可以分段进行,并使用`batch_size`以百分比形式调节时间段长短;
`test_on_neighbors`将会被自动打开当`social_refine == True`
"""
print('-----------------Test options-----------------')
print('model_name = {},\ndataset = {},\ntest_length= {} * length of test video.\n'.format(
self.args.model_name,
self.args.test_set,
batch_size,
))
start_frame = agents_test[0].obs_frame
end_frame = agents_test[-1].obs_frame
frame_length = end_frame - start_frame
# sort by obs time
agents_batch = dict()
for agent in agents_test:
batch_index = min(int((agent.obs_frame - start_frame)/(batch_size * frame_length)), int(1/batch_size)-1)
if not batch_index in agents_batch:
agents_batch[batch_index] = []
else:
agents_batch[batch_index].append(agent)
if social_refine:
test_on_neighbors = True
agents_batch, test_index = self.prepare_test_agents_batch(agents_batch, test_on_neighbors)
# run test
all_loss = []
all_loss_batch = []
for batch_index in agents_batch:
batch_loss = []
[test_tensor, _], _ = self.prepare_model_inputs_all(agents_batch[batch_index], calculate_neighbor=test_on_neighbors)
pred = self.forward_train(test_tensor)
pred = pred[0].numpy()
for agent_index, index in enumerate(test_index[batch_index]):
current_pred = pred[index]
agents_batch[batch_index][agent_index].write_pred(current_pred[0])
if test_on_neighbors:
agents_batch[batch_index][agent_index].write_pred_neighbor(current_pred[1:])
if social_refine:
agents_batch[batch_index][agent_index].write_pred_sr(SocialRefine_one(
agent=agents_batch[batch_index][agent_index],
args=self.args,
epochs=10,
save=False,
))
loss = agents_batch[batch_index][agent_index].calculate_loss(SR=social_refine)
all_loss.append(loss)
batch_loss.append(loss)
all_loss_batch.append(np.mean(np.stack(batch_loss), axis=0))
average_loss = np.mean(np.stack(all_loss), axis=0)
print('test_loss={}\nTest done.'.format(create_loss_dict(average_loss, ['ADE', 'FDE'])))
# print(all_loss_batch)
if draw_results:
result_agents = []
for batch_index in agents_batch:
result_agents += agents_batch[batch_index]
# draw results only
for index in range(len(result_agents)):
result_agents[index].draw_results(self.log_dir, '{}.png'.format(index), draw_neighbors=False)
# draw results on video frames
# tv = TrajVisual(save_base_path=self.args.log_dir, verbose=True, draw_neighbors=False, social_refine=social_refine)
# tv.visual(result_agents, dataset=self.args.test_set)
if save_agents:
result_agents = []
for batch_index in agents_batch:
result_agents += agents_batch[batch_index]
np.save(os.path.join(self.log_dir, 'pred.npy'), result_agents)
return result_agents
def test(self, agents_test, test_on_neighbors=False, social_refine=True, draw_results=True, batch_size=0.2, save_agents=False):
"""
Eval model on test sets.
Results WILL be written to inputs.
"""
all_loss = []
loss_name_list = ['ADE', 'FDE']
loss_function = calculate_ADE_FDE_numpy
self.test_tensor, self.test_index = self.prepare_model_inputs_all(self.agents_test)
pred = self.forward_train(self.test_tensor)
for index in tqdm(range(len(agents_test)), desc='Testing...'):
obs = agents_test[index].get_train_traj().reshape([1, agents_test[index].obs_length, 2])
# if test_on_neighbors and agents_test[index].neighbor_number > 0:
# obs_neighbor = (np.stack(agents_test[index].get_neighbor_traj())).reshape([agents_test[index].neighbor_number, agents_test[index].obs_length, 2])
# obs = np.concatenate([obs, obs_neighbor], axis=0)
agents_test[index].write_pred(pred[0].numpy()[index])
# if test_on_neighbors:
# agents_test[index].write_pred_neighbor(pred[1:].numpy()[index])
# if social_refine:
# agents_test[index].write_pred_sr(SocialRefine_one(agents_test[index], self.args_old))
if draw_results:
agents_test[index].draw_results(self.log_dir, '{}.png'.format(index), draw_neighbors=False # test_on_neighbors
)
all_loss.append(agents_test[index].calculate_loss())
loss = np.mean(np.stack(all_loss), axis=0)
print('test_loss={}'.format(create_loss_dict(loss, loss_name_list)))
# for l in loss:
# print(loss, end='\t')
print('\nTest done.')
if save_agents:
np.save(os.path.join(self.log_dir, 'pred.npy'), agents_test)
return agents_test
def prepare_test_agents_batch(self, agents_batch:dict, test_on_neighbors=False):
"""
Prepare test agents and save test order. (When test on neighbors of current agent)
returns: Test agents (in batch order) `agents_batch` and their | |
import ast
import importlib
import re
from inspect import isclass
from mimetypes import add_type, guess_type
import numpy as np
import pandas as pd
import woodwork as ww
from woodwork.pandas_backport import guess_datetime_format
# Dictionary mapping formats/content types to the appropriate pandas read function
type_to_read_func_map = {
"csv": pd.read_csv,
"text/csv": pd.read_csv,
"parquet": pd.read_parquet,
"application/parquet": pd.read_parquet,
"arrow": pd.read_feather,
"application/arrow": pd.read_feather,
"feather": pd.read_feather,
"application/feather": pd.read_feather,
"orc": pd.read_orc,
"application/orc": pd.read_orc,
}
PYARROW_ERR_MSG = (
"The pyarrow library is required to read from parquet/arrow/feather files.\n"
"Install via pip:\n"
" pip install 'pyarrow>=3.0.0'\n"
"Install via conda:\n"
" conda install 'pyarrow>=3.0.0'"
)
# Add new mimetypes
add_type("application/parquet", ".parquet")
add_type("application/arrow", ".arrow")
add_type("application/feather", ".feather")
add_type("application/orc", ".orc")
def import_or_none(library):
"""Attempts to import the requested library.
Args:
library (str): the name of the library
Returns: the library if it is installed, else None
"""
try:
return importlib.import_module(library)
except ImportError:
return None
def camel_to_snake(s):
s = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s).lower()
def _convert_input_to_set(semantic_tags, error_language="semantic_tags", validate=True):
"""Takes input as a single string, a list of strings, or a set of strings
and returns a set with the supplied values. If no values are supplied,
an empty set will be returned."""
if not semantic_tags:
return set()
if validate:
_validate_tags_input_type(semantic_tags, error_language)
if isinstance(semantic_tags, str):
return {semantic_tags}
if isinstance(semantic_tags, list):
semantic_tags = set(semantic_tags)
if validate:
_validate_string_tags(semantic_tags, error_language)
return semantic_tags
def _validate_tags_input_type(semantic_tags, error_language):
if type(semantic_tags) not in [list, set, str]:
raise TypeError(f"{error_language} must be a string, set or list")
def _validate_string_tags(semantic_tags, error_language):
if not all([isinstance(tag, str) for tag in semantic_tags]):
raise TypeError(f"{error_language} must contain only strings")
def read_file(
filepath=None,
content_type=None,
name=None,
index=None,
time_index=None,
semantic_tags=None,
logical_types=None,
use_standard_tags=True,
column_origins=None,
replace_nan=False,
validate=True,
**kwargs,
):
"""Read data from the specified file and return a DataFrame with initialized Woodwork typing information.
Note:
As the engine `fastparquet` cannot handle nullable pandas dtypes, `pyarrow` will be used
for reading from parquet and arrow.
Args:
filepath (str): A valid string path to the file to read
content_type (str): Content type of file to read
name (str, optional): Name used to identify the DataFrame.
index (str, optional): Name of the index column.
time_index (str, optional): Name of the time index column.
semantic_tags (dict, optional): Dictionary mapping column names in the dataframe to the
semantic tags for the column. The keys in the dictionary should be strings
that correspond to columns in the underlying dataframe. There are two options for
specifying the dictionary values:
(str): If only one semantic tag is being set, a single string can be used as a value.
(list[str] or set[str]): If multiple tags are being set, a list or set of strings can be
used as the value.
Semantic tags will be set to an empty set for any column not included in the
dictionary.
logical_types (dict[str -> LogicalType], optional): Dictionary mapping column names in
the dataframe to the LogicalType for the column. LogicalTypes will be inferred
for any columns not present in the dictionary.
use_standard_tags (bool, optional): If True, will add standard semantic tags to columns based
on the inferred or specified logical type for the column. Defaults to True.
column_origins (str or dict[str -> str], optional): Origin of each column. If a string is supplied, it is
used as the origin for all columns. A dictionary can be used to set origins for individual columns.
replace_nan (bool, optional): Whether to replace empty string values and string representations of
NaN values ("nan", "<NA>") with np.nan or pd.NA values based on column dtype. Defaults to False.
validate (bool, optional): Whether parameter and data validation should occur. Defaults to True. Warning:
Should be set to False only when parameters and data are known to be valid.
Any errors resulting from skipping validation with invalid inputs may not be easily understood.
**kwargs: Additional keyword arguments to pass to the underlying pandas read file function. For more
information on available keywords refer to the pandas documentation.
Returns:
pd.DataFrame: DataFrame created from the specified file with Woodwork typing information initialized.
"""
if content_type is None:
inferred_type, _ = guess_type(filepath)
if inferred_type is None:
raise RuntimeError(
"Content type could not be inferred. Please specify content_type and try again."
)
content_type = inferred_type
if content_type not in type_to_read_func_map:
raise RuntimeError(
"Reading from content type {} is not currently supported".format(
content_type
)
)
pyarrow_types = [
"parquet",
"application/parquet",
"arrow",
"application/arrow",
"feather",
"application/feather",
"orc",
"application/orc",
]
if content_type in pyarrow_types:
import_or_raise("pyarrow", PYARROW_ERR_MSG)
if content_type in ["parquet", "application/parquet"]:
kwargs["engine"] = "pyarrow"
dataframe = type_to_read_func_map[content_type](filepath, **kwargs)
if replace_nan:
dataframe = _replace_nan_strings(dataframe)
dataframe.ww.init(
name=name,
index=index,
time_index=time_index,
semantic_tags=semantic_tags,
logical_types=logical_types,
use_standard_tags=use_standard_tags,
column_origins=column_origins,
validate=validate,
)
return dataframe
def import_or_raise(library, error_msg):
"""Attempts to import the requested library. If the import fails, raises an
ImportError with the supplied error message.
Args:
library (str): the name of the library
error_msg (str): error message to return if the import fails
"""
try:
return importlib.import_module(library)
except ImportError:
raise ImportError(error_msg)
def _is_s3(string):
"""Checks if the given string is a s3 path. Returns a boolean."""
return "s3://" in string
def _is_url(string):
"""Checks if the given string is an url path. Returns a boolean."""
return "http" in string
def _reformat_to_latlong(latlong, use_list=False):
"""Reformats LatLong columns to be tuples of floats. Uses np.nan for null values."""
if _is_null_latlong(latlong):
return np.nan
if isinstance(latlong, str):
try:
# Serialized latlong columns from csv or parquet will be strings, so null values will be
# read as the string 'nan' in pandas and Dask and 'NaN' in Koalas
# neither of which which is interpretable as a null value
if "nan" in latlong:
latlong = latlong.replace("nan", "None")
if "NaN" in latlong:
latlong = latlong.replace("NaN", "None")
latlong = ast.literal_eval(latlong)
except ValueError:
pass
if isinstance(latlong, (tuple, list)):
if len(latlong) != 2:
raise ValueError(
f"LatLong values must have exactly two values. {latlong} does not have two values."
)
latitude, longitude = map(_to_latlong_float, latlong)
# (np.nan, np.nan) should be counted as a single null value
if pd.isnull(latitude) and pd.isnull(longitude):
return np.nan
if use_list:
return [latitude, longitude]
return (latitude, longitude)
raise ValueError(
f"LatLongs must either be a tuple, a list, or a string representation of a tuple. {latlong} does not fit the criteria."
)
def _to_latlong_float(val):
"""Attempts to convert a value to a float, propagating null values."""
if _is_null_latlong(val):
return np.nan
try:
return float(val)
except (ValueError, TypeError):
raise ValueError(
f"Latitude and Longitude values must be in decimal degrees. The latitude or longitude represented by {val} cannot be converted to a float."
)
def _is_valid_latlong_series(series):
"""Returns True if all elements in the series contain properly formatted LatLong values,
otherwise returns False"""
if ww.accessor_utils._is_dask_series(series):
series = series = series.get_partition(0).compute()
if ww.accessor_utils._is_koalas_series(series):
series = series.to_pandas()
bracket_type = list
else:
bracket_type = tuple
if series.apply(_is_valid_latlong_value, args=(bracket_type,)).all():
return True
return False
def _is_valid_latlong_value(val, bracket_type=tuple):
"""Returns True if the value provided is a properly formatted LatLong value for a
pandas, Dask or Koalas Series, otherwise returns False."""
if isinstance(val, bracket_type) and len(val) == 2:
latitude, longitude = val
if isinstance(latitude, float) and isinstance(longitude, float):
if pd.isnull(latitude) and pd.isnull(longitude):
return False
return True
elif isinstance(val, float) and pd.isnull(val):
return True
return False
def _is_null_latlong(val):
if isinstance(val, str):
return val == "None" or val == "nan" or val == "NaN"
# Since we can have list inputs here, pd.isnull will not have a relevant truth value for lists
return not isinstance(val, list) and pd.isnull(val)
def get_valid_mi_types():
"""
Generate a list of LogicalTypes that are valid for calculating mutual information. Note that
index columns are not valid for calculating mutual information, but their types may be
returned by this function.
Args:
None
Returns:
list(LogicalType): A list of the LogicalTypes that can be use to calculate mutual information
"""
valid_types = []
for ltype in ww.type_system.registered_types:
if "category" in ltype.standard_tags:
valid_types.append(ltype)
elif "numeric" in ltype.standard_tags:
valid_types.append(ltype)
elif (
ltype == ww.logical_types.Datetime
or ltype == ww.logical_types.Boolean
or ltype == ww.logical_types.BooleanNullable
):
valid_types.append(ltype)
return valid_types
def _get_column_logical_type(series, logical_type, name):
if logical_type:
return _parse_logical_type(logical_type, name)
else:
return ww.type_system.infer_logical_type(series)
def _parse_logical_type(logical_type, name):
if isinstance(logical_type, str):
logical_type = ww.type_system.str_to_logical_type(logical_type)
if isclass(logical_type):
logical_type = logical_type()
if type(logical_type) not in ww.type_system.registered_types:
raise TypeError(f"Invalid logical type specified for '{name}'")
return logical_type
def concat_columns(objs, validate_schema=True):
"""
Concatenate Woodwork objects along | |
import functools
import os.path
import re
from datetime import datetime
from copy import copy
from math import cos, gcd, isinf, pi, sin, sqrt, tau
from os.path import realpath
from random import randint, shuffle
from numpy import linspace
from meerk40t.core.exceptions import BadFileError
from meerk40t.kernel import CommandSyntaxError, Service, Settings
from ..svgelements import Angle, Color, Matrix, SVGElement, Viewbox, SVG_RULE_EVENODD, SVG_RULE_NONZERO
from .cutcode import CutCode
from .element_types import *
from .node.elem_image import ImageNode
from .node.node import Node, Linecap, Linejoin, Fillrule
from .node.op_console import ConsoleOperation
from .node.op_cut import CutOpNode
from .node.op_dots import DotsOpNode
from .node.op_engrave import EngraveOpNode
from .node.op_hatch import HatchOpNode
from .node.op_image import ImageOpNode
from .node.op_raster import RasterOpNode
from .node.rootnode import RootNode
from .wordlist import Wordlist
from .units import UNITS_PER_PIXEL, Length
def plugin(kernel, lifecycle=None):
_ = kernel.translation
if lifecycle == "register":
kernel.add_service("elements", Elemental(kernel))
# kernel.add_service("elements", Elemental(kernel,1))
elif lifecycle == "postboot":
elements = kernel.elements
choices = [
{
"attr": "operation_default_empty",
"object": elements,
"default": True,
"type": bool,
"label": _("Default Operation Empty"),
"tip": _(
"Leave empty operations or default Other/Red/Blue"
),
},
{
"attr": "classify_reverse",
"object": elements,
"default": False,
"type": bool,
"label": _("Classify Reversed"),
"tip": _(
"Classify elements into operations in reverse order e.g. to match Inkscape's Object List"
),
},
{
"attr": "legacy_classification",
"object": elements,
"default": False,
"type": bool,
"label": _("Legacy Classify"),
"tip": _(
"Use the legacy classification algorithm rather than the modern classification algorithm."
),
},
]
kernel.register_choices("preferences", choices)
elif lifecycle == "prestart":
if hasattr(kernel.args, "input") and kernel.args.input is not None:
# Load any input file
elements = kernel.elements
try:
elements.load(realpath(kernel.args.input.name))
except BadFileError as e:
kernel._console_channel(_("File is Malformed") + ": " + str(e))
elif lifecycle == "poststart":
if hasattr(kernel.args, "output") and kernel.args.output is not None:
# output the file you have at this point.
elements = kernel.elements
elements.save(realpath(kernel.args.output.name))
def reversed_enumerate(collection: list):
for i in range(len(collection) - 1, -1, -1):
yield i, collection[i]
OP_PRIORITIES = ["op dots", "op image", "op raster", "op engrave", "op cut", "op hatch"]
# def is_dot(element):
# if not isinstance(element, Shape):
# return False
# if isinstance(element, Path):
# path = element
# else:
# path = element.segments()
#
# if len(path) == 2 and isinstance(path[0], Move):
# if isinstance(path[1], Close):
# return True
# if isinstance(path[1], Line) and path[1].length() == 0:
# return True
# return False
# def is_straight_line(element):
# if not isinstance(element, Shape):
# return False
# if isinstance(element, Path):
# path = element
# else:
# path = element.segments()
#
# if len(path) == 2 and isinstance(path[0], Move):
# if isinstance(path[1], Line) and path[1].length() > 0:
# return True
# return False
class Elemental(Service):
"""
The elemental service is governs all the interactions with the various elements,
operations, and filenodes. Handling structure change and selection, emphasis, and
highlighting changes. The goal of this module is to make sure that the life cycle
of the elements is strictly enforced. For example, every element that is removed
must have had the .cache deleted. And anything selecting an element must propagate
that information out to inform other interested modules.
"""
def __init__(self, kernel, index=None, *args, **kwargs):
Service.__init__(
self, kernel, "elements" if index is None else "elements%d" % index
)
self._clipboard = {}
self._clipboard_default = "0"
self.note = None
self._emphasized_bounds = None
self._emphasized_bounds_dirty = True
self._tree = RootNode(self)
self.setting(bool, "classify_reverse", False)
self.setting(bool, "legacy_classification", False)
self.setting(bool, "auto_note", True)
self.setting(bool, "uniform_svg", False)
self.setting(float, "svg_ppi", 96.0)
self.setting(bool, "operation_default_empty", True)
self.op_data = Settings(self.kernel.name, "operations.cfg")
self.pen_data = Settings(self.kernel.name, "penbox.cfg")
self.penbox = {}
self.load_persistent_penbox()
self.wordlists = {"version": [1, self.kernel.version]}
self._init_commands(kernel)
self._init_tree(kernel)
direct = os.path.dirname(self.op_data._config_file)
self.mywordlist = Wordlist(self.kernel.version, direct)
self.load_persistent_operations("previous")
ops = list(self.ops())
if not len(ops) and not self.operation_default_empty:
self.load_default()
def load_persistent_penbox(self):
settings = self.pen_data
pens = settings.read_persistent_string_dict("pens", suffix=True)
for pen in pens:
length = int(pens[pen])
box = list()
for i in range(length):
penbox = dict()
settings.read_persistent_string_dict(f'{pen} {i}', penbox, suffix=True)
box.append(penbox)
self.penbox[pen] = box
def save_persistent_penbox(self):
sections = {}
for section in self.penbox:
sections[section] = len(self.penbox[section])
self.pen_data.write_persistent_dict("pens", sections)
for section in self.penbox:
for i, p in enumerate(self.penbox[section]):
self.pen_data.write_persistent_dict(f'{section} {i}', p)
def wordlist_fetch(self, key):
try:
wordlist = self.wordlists[key]
except KeyError:
return None
try:
wordlist[0] += 1
return wordlist[wordlist[0]]
except IndexError:
wordlist[0] = 1
return wordlist[wordlist[0]]
def index_range(self, index_string):
"""
Parses index ranges in the form <idx>,<idx>-<idx>,<idx>
@param index_string:
@return:
"""
indexes = list()
for s in index_string.split(","):
q = list(s.split("-"))
if len(q) == 1:
indexes.append(int(q[0]))
else:
start = int(q[0])
end = int(q[1])
if start > end:
for q in range(end, start + 1):
indexes.append(q)
else:
for q in range(start, end + 1):
indexes.append(q)
return indexes
def length(self, v):
return float(Length(v))
def length_x(self, v):
return float(Length(v, relative_length=self.device.width))
def length_y(self, v):
return float(Length(v, relative_length=self.device.height))
def area(self, v):
llx = Length(v, relative_length=self.device.width)
lx = float(llx)
if "%" in v:
lly = Length(v, relative_length=self.device.height)
else:
lly = Length("1{unit}".format(unit=llx._preferred_units))
ly = float(lly)
return lx * ly
def _init_commands(self, kernel):
_ = kernel.translation
@self.console_argument("filename")
@self.console_command(
"load",
help=_("loads file from working directory"),
input_type=None,
output_type="file",
)
def load(channel, _, filename=None, **kwargs):
import os
if filename is None:
channel(_("No file specified."))
return
new_file = os.path.join(self.kernel.current_directory, filename)
if not os.path.exists(new_file):
channel(_("No such file."))
return
try:
result = self.load(new_file)
if result:
channel(_("loading..."))
except AttributeError:
raise CommandSyntaxError(_("Loading files was not defined"))
return "file", new_file
# ==========
# WORDLISTS COMMANDS
# ==========
@self.console_command(
"wordlist",
help=_("Wordlist base operation"),
output_type="wordlist",
)
def wordlist(command, channel, _, remainder = None, **kwargs):
return "wordlist", ""
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("value", help=_("Content"))
@self.console_command(
"add",
help=_("add value to wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_add(
command, channel, _, key=None, value=None, **kwargs
):
if key is not None:
if value is None:
value = ""
self.mywordlist.add(key, value)
return "wordlist", key
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("value", help=_("Content"))
@self.console_command(
"addcounter",
help=_("add numeric counter to wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_addcounter(
command, channel, _, key=None, value=None, **kwargs
):
if key is not None:
if value is None:
value = 1
else:
try:
value = int(value)
except ValueError:
value = 1
self.mywordlist.add(key, value, 2)
return "wordlist", key
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("index", help=_("index to use"))
@self.console_command(
"get",
help=_("get current value from wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_get(
command, channel, _, key=None, index=None, **kwargs
):
if key is not None:
result = self.mywordlist.fetch_value(skey=key, idx=index)
channel(str(result))
else:
channel(_("Missing key"))
result = ""
return "wordlist", result
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("value", help=_("Wordlist value"))
@self.console_argument("index", help=_("index to use"))
@self.console_command(
"set",
help=_("set value to wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_set(
command, channel, _, key=None, value=None, index=None, **kwargs
):
if key is not None and value is not None:
self.mywordlist.set_value(skey=key, value=value, idx=index)
else:
channel(_("Not enough parameters given"))
return "wordlist", key
@self.console_argument("key", help=_("Individual wordlist value (use @ALL for all)"))
@self.console_argument("index", help=_("index to use"))
@self.console_command(
"index",
help=_("sets index in wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_index(
command, channel, _, key=None, index=None, **kwargs
):
if key is not None and index is not None:
try:
index = int(index)
except ValueError:
index = 0
self.mywordlist.set_index(skey=key,idx=index)
return "wordlist", key
@self.console_argument("filename", help=_("Wordlist file (if empty use mk40-default)"))
@self.console_command(
"restore",
help=_("Loads a previously saved wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_restore(
command, channel, _, filename=None, remainder=None, **kwargs
):
new_file = filename
if not filename is None:
new_file = os.path.join(self.kernel.current_directory, filename)
if not os.path.exists(new_file):
channel(_("No such file."))
return
self.mywordlist.load_data(new_file)
return "wordlist", ""
@self.console_argument("filename", help=_("Wordlist file (if empty use mk40-default)"))
@self.console_command(
"backup",
help=_("Saves the current wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_backup(
command, channel, _, filename=None, remainder=None, **kwargs
):
new_file = filename
if not filename is None:
new_file = os.path.join(self.kernel.current_directory, filename)
self.mywordlist.save_data(new_file)
return "wordlist", ""
@self.console_argument("key", help=_("Wordlist value"))
@self.console_command(
"list",
help=_("list wordlist values"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_list(
command, channel, _, key=None, **kwargs
):
channel("----------")
if key is None:
for skey in self.mywordlist.content:
channel(str(skey))
else:
if key in self.mywordlist.content:
wordlist = self.mywordlist.content[key]
channel(_("Wordlist %s (Type=%d, Index=%d)):") % (key, wordlist[0], wordlist[1]-2))
for idx, value in enumerate(wordlist[2:]):
channel("#%d: %s" % (idx, str(value)))
else:
channel(_("There is no such pattern %s") % key )
channel("----------")
return "wordlist", key
@self.console_argument("filename", help=_("CSV file"))
@self.console_command(
"load",
help=_("Attach a csv-file to the wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_load(
command, channel, _, filename=None, **kwargs
):
if filename is None:
channel(_("No file specified."))
return
new_file = os.path.join(self.kernel.current_directory, filename)
if not os.path.exists(new_file):
channel(_("No such file."))
return
rows, columns, names = self.mywordlist.load_csv_file(new_file)
channel (_("Rows added: %d") % rows)
channel (_("Values added: %d") % columns)
for name in names:
channel (" " + name)
return "wordlist", names
# ==========
# PENBOX COMMANDS
# ==========
@self.console_argument("key", help=_("Penbox | |
0, 0, 0, 0],
[1589, 4.1629, 0, 9999, -9999, 1.0, 100, 1, 48.538268, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1590, 7.898876, 0, 9999, -9999, 1.0, 100, 1, 119.077525, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1591, 9.928175, 0, 9999, -9999, 1.0, 100, 1, 142.8447, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1592, 0.155196, 0, 9999, -9999, 1.0, 100, 1, 9.842361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1593, 0.163439, 0, 9999, -9999, 1.0, 100, 1, 7.183183, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1594, 0.190524, 0, 9999, -9999, 1.0, 100, 1, 9.56089, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1595, 2.350877, 0, 9999, -9999, 1.0, 100, 1, 54.79001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1596, 5.937585, 0, 9999, -9999, 1.0, 100, 1, 138.730049, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1597, 0.102115, 0, 9999, -9999, 1.0, 100, 1, 2.858987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1598, 0.119087, 0, 9999, -9999, 1.0, 100, 1, 4.795494, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1599, 2.975996, 0, 9999, -9999, 1.0, 100, 1, 86.703571, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1600, 1.214112, 0, 9999, -9999, 1.0, 100, 1, 25.356501, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1601, 0.169626, 0, 9999, -9999, 1.0, 100, 1, 7.643653, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1602, 1.502275, 0, 9999, -9999, 1.0, 100, 1, 45.658169, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1603, 6.047667, 0, 9999, -9999, 1.0, 100, 1, 26.209248, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1604, 4.332527, 0, 9999, -9999, 1.0, 100, 1, 16.363032, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1605, 11.484777, 0, 9999, -9999, 1.0, 100, 1, 43.477178, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1606, 0.66219, 0, 9999, -9999, 1.0, 100, 1, 42.024907, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1607, 6.682815, 0, 9999, -9999, 1.0, 100, 1, 19.395236, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1608, 0.273665, 0, 9999, -9999, 1.0, 100, 1, 19.491249, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1609, 0.111972, 0, 9999, -9999, 1.0, 100, 1, 6.052272, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1610, 0.313638, 0, 9999, -9999, 1.0, 100, 1, 18.571656, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1611, 0.202693, 0, 9999, -9999, 1.0, 100, 1, 6.420554, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1612, 0.243534, 0, 9999, -9999, 1.0, 100, 1, 10.811203, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1613, 0.875852, 0, 9999, -9999, 1.0, 100, 1, 27.976217, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1614, 0.915802, 0, 9999, -9999, 1.0, 100, 1, 28.183827, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1615, 4.420545, 0, 9999, -9999, 1.0, 100, 1, 193.234776, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1616, 0.195687, 0, 9999, -9999, 1.0, 100, 1, 6.865586, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1617, 0.307703, 0, 9999, -9999, 1.0, 100, 1, 10.63107, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1618, 0.094766, 0, 9999, -9999, 1.0, 100, 1, 4.920368, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1619, 0.190162, 0, 9999, -9999, 1.0, 100, 1, 6.689637, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1620, 0.03523, 0, 9999, -9999, 1.0, 100, 1, 1.912024, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1621, 0.14905, 0, 9999, -9999, 1.0, 100, 1, 8.056388, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1622, 0.105333, 0, 9999, -9999, 1.0, 100, 1, 5.693597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1623, 0.495052, 0, 9999, -9999, 1.0, 100, 1, 20.717111, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1624, 0.150461, 0, 9999, -9999, 1.0, 100, 1, 8.938454, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1625, 1.288131, 0, 9999, -9999, 1.0, 100, 1, 65.182465, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1626, 0.219041, 0, 9999, -9999, 1.0, 100, 1, 11.878862, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1627, 0.393942, 0, 9999, -9999, 1.0, 100, 1, 10.196496, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1628, 2.771182, 0, 9999, -9999, 1.0, 100, 1, 66.613993, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1629, 3.312086, 0, 9999, -9999, 1.0, 100, 1, 121.671047, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1630, 0.185001, 0, 9999, -9999, 1.0, 100, 1, 12.452584, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1631, 0.864054, 0, 9999, -9999, 1.0, 100, 1, 32.486249, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1632, 6.924615, 0, 9999, -9999, 1.0, 100, 1, 25.874893, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1633, 4.160116, 0, 9999, -9999, 1.0, 100, 1, 67.433329, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1634, 0.178405, 0, 9999, -9999, 1.0, 100, 1, 9.643044, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1635, 3.116781, 0, 9999, -9999, 1.0, 100, 1, 19.166135, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1636, 0.745491, 0, 9999, -9999, 1.0, 100, 1, 25.181406, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1637, 1.059606, 0, 9999, -9999, 1.0, 100, 1, 29.114828, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1638, 0.397925, 0, 9999, -9999, 1.0, 100, 1, 12.162188, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1639, 0.767671, 0, 9999, -9999, 1.0, 100, 1, 29.183593, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1640, 0.037125, 0, 9999, -9999, 1.0, 100, 1, 2.237652, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1641, 0.078629, 0, 9999, -9999, 1.0, 100, 1, 5.023705, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1642, 0.183602, 0, 9999, -9999, 1.0, 100, 1, 11.730623, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1643, 0.067335, 0, 9999, -9999, 1.0, 100, 1, 3.417684, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1644, 0.341993, 0, 9999, -9999, 1.0, 100, 1, 11.76596, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1645, 0.277807, 0, 9999, -9999, 1.0, 100, 1, 11.144882, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1646, 0.062045, 0, 9999, -9999, 1.0, 100, 1, 3.73271, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1647, 0.371991, 0, 9999, -9999, 1.0, 100, 1, 17.434827, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1648, 7.12459, 0, 9999, -9999, 1.0, 100, 1, 109.345623, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1649, 0.487164, 0, 9999, -9999, 1.0, 100, 1, 23.481556, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1650, 7.82586, 0, 9999, -9999, 1.0, 100, 1, 176.928964, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1651, 10.014474, 0, 9999, -9999, 1.0, 100, 1, 161.276649, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1652, 3.847745, 0, 9999, -9999, 1.0, 100, 1, 84.070562, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1653, 0.848807, 0, 9999, -9999, 1.0, 100, 1, 18.431241, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1654, 2.933278, 0, 9999, -9999, 1.0, 100, 1, 47.53021, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1655, 0.186809, 0, 9999, -9999, 1.0, 100, 1, 10.79071, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1656, 0.05951, 0, 9999, -9999, 1.0, 100, 1, 2.680105, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1657, 0.149488, 0, 9999, -9999, 1.0, 100, 1, 5.6313, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1658, 0.030293, 0, 9999, -9999, 1.0, 100, 1, 1.879381, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1659, 6.279257, 0, 9999, -9999, 1.0, 100, 1, 91.77667, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1660, 14.343715, 0, 9999, -9999, 1.0, 100, 1, 186.942171, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1661, 7.551876, 0, 9999, -9999, 1.0, 100, 1, 138.604087, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1662, 0.052036, 0, 9999, -9999, 1.0, 100, 1, 3.040325, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1663, 0.043029, 0, 9999, -9999, 1.0, 100, 1, 1.600649, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1664, 0.029883, 0, 9999, -9999, 1.0, 100, 1, 1.578207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1665, 1.325999, 0, 9999, -9999, 1.0, 100, 1, 48.659717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1666, 0.098115, 0, 9999, -9999, 1.0, 100, 1, 2.877877, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1667, 0.191254, 0, 9999, -9999, 1.0, 100, 1, 5.227282, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1668, 0.063783, 0, 9999, -9999, 1.0, 100, 1, 3.927043, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1669, 3.091164, 0, 9999, -9999, 1.0, 100, 1, 72.677935, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1670, 4.129134, 0, 9999, -9999, 1.0, 100, 1, 111.043025, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1671, 1.293399, 0, 9999, -9999, 1.0, 100, 1, 62.404971, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1672, 0.260004, 0, 9999, -9999, 1.0, 100, 1, 10.579925, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1673, 0.194865, 0, 9999, -9999, 1.0, 100, 1, 4.091034, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1674, 1.539003, 0, 9999, -9999, 1.0, 100, 1, 47.970381, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1675, 1.124901, 0, 9999, -9999, 1.0, 100, 1, 31.233663, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1676, 3.633638, 0, 9999, -9999, 1.0, 100, 1, 83.173368, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1677, 0.918736, 0, 9999, -9999, 1.0, 100, 1, 13.887293, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1678, 5.037245, 0, 9999, -9999, 1.0, 100, 1, 226.804108, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1679, 1.387895, 0, 9999, -9999, 1.0, 100, 1, 71.380413, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1680, 4.902213, 0, 9999, -9999, 1.0, 100, 1, 52.148102, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1681, 1.48382, 0, 9999, -9999, 1.0, 100, 1, 17.30062, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1682, 3.353086, 0, 9999, -9999, 1.0, 100, 1, 39.892468, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1683, 0.233164, 0, 9999, -9999, 1.0, 100, 1, 9.189765, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1684, 3.160044, 0, 9999, -9999, 1.0, 100, 1, 40.575646, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1685, 5.440318, 0, 9999, -9999, 1.0, 100, 1, 74.922434, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1686, 3.356229, 0, 9999, -9999, 1.0, 100, 1, 81.035483, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1687, 6.762983, 0, 9999, -9999, 1.0, 100, 1, 112.01808, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1688, 1.935587, 0, 9999, -9999, 1.0, 100, 1, 18.158729, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1689, 5.351676, 0, 9999, -9999, 1.0, 100, 1, 116.696894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1690, 8.256594, 0, 9999, -9999, 1.0, 100, 1, 116.477465, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1691, 7.35631, 0, 9999, -9999, 1.0, 100, 1, 228.38653, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1692, 3.390353, 0, 9999, -9999, 1.0, 100, 1, 26.501573, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1693, 42.864866, 0, 9999, -9999, 1.0, 100, 1, 86.236575, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1694, 4.607443, 0, 9999, -9999, 1.0, 100, 1, 53.656832, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1695, 1.988569, 0, 9999, -9999, 1.0, 100, 1, 23.132774, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1696, 3.830786, 0, 9999, -9999, 1.0, 100, 1, 53.34209, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1697, 55.164683, 0, 9999, -9999, 1.0, 100, 1, 136.821485, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1698, 0.856795, 0, 9999, -9999, 1.0, 100, 1, 25.60631, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1699, 0.174604, 0, 9999, -9999, 1.0, 100, 1, 5.356106, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1700, 1.563843, 0, 9999, -9999, 1.0, 100, 1, 55.825815, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1701, 1.121985, 0, 9999, -9999, 1.0, 100, 1, 37.297196, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1702, 1.674158, 0, 9999, -9999, 1.0, 100, 1, 25.149806, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1703, 3.16561, 0, 9999, -9999, 1.0, 100, 1, 48.587768, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1704, 6.284237, 0, 9999, -9999, 1.0, 100, 1, 127.647586, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1705, 2.315371, 0, 9999, -9999, 1.0, 100, 1, 52.051788, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1706, 0.32622, 0, 9999, -9999, 1.0, 100, 1, 6.76178, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1707, 0.386939, 0, 9999, -9999, 1.0, 100, 1, 11.7078, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1708, 1.03906, 0, 9999, -9999, 1.0, 100, 1, 26.288692, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1709, 8.733266, 0, 9999, -9999, 1.0, 100, 1, 226.257418, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1710, 4.905366, 0, 9999, -9999, 1.0, 100, 1, 183.631947, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1711, 0.136186, 0, 9999, -9999, 1.0, 100, 1, 7.213854, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1712, 6.392278, 0, 9999, -9999, 1.0, 100, 1, 75.638853, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1713, 4.470768, 0, 9999, -9999, 1.0, 100, 1, 90.775073, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1714, 2.656482, 0, 9999, -9999, 1.0, 100, 1, 42.312538, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1715, 5.422016, 0, 9999, -9999, 1.0, 100, 1, 155.279397, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1716, 111.568589, 0, 9999, -9999, 1.0, 100, 1, 156.979012, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
== id))
source = self.fetch_from_2(self.build_raw_query_with_star())[0]
hidden = self.fetch_from_1(self.build_raw_query_with_star())[0]
return source, hidden, self.data
class TestReturningProcessingPostgreSQL(TestReturningProcessingMixing, BaseTokenization):
data = {
'nullable_column': None,
'empty': b'',
'token_i32': random_int32(),
'token_i64': random_int64(),
'token_str': random_str(),
'token_bytes': random_bytes(),
'token_email': random_email(),
}
def checkSkip(self):
if not TEST_POSTGRESQL:
self.skipTest("Only for PostgreSQL")
super().checkSkip()
def build_raw_query_with_enum(self):
self.data['id'] = get_random_id()
return self.specific_client_id_table.insert(). \
returning(self.specific_client_id_table.c.id, self.specific_client_id_table.c.token_str, self.specific_client_id_table.c.token_i64,
self.specific_client_id_table.c.token_email, self.specific_client_id_table.c.token_i32), self.data
def build_raw_query_with_star(self):
self.data['id'] = get_random_id()
return self.specific_client_id_table.insert().returning(sa.literal_column('*')), self.data
def insert_with_enum_and_return_data(self):
metadata.create_all(self.engine_raw, [self.specific_client_id_table])
self.fetch_from_2(sa.select([self.specific_client_id_table]).where(self.specific_client_id_table.c.id == get_random_id()))
source_query, source_data = self.build_raw_query_with_enum()
source = self.engine2.execute(source_query, source_data).fetchone()
hidden_query, hidden_data = self.build_raw_query_with_enum()
hidden = self.engine1.execute(hidden_query, hidden_data).fetchone()
return source, hidden, self.data
def insert_with_star_and_return_data(self):
metadata.create_all(self.engine_raw, [self.specific_client_id_table])
self.fetch_from_2(sa.select([self.specific_client_id_table]).where(self.specific_client_id_table.c.id == get_random_id()))
source_query, data = self.build_raw_query_with_star()
source = self.engine2.execute(source_query, data).fetchone()
hidden_query, data = self.build_raw_query_with_star()
hidden = self.engine1.execute(hidden_query, data).fetchone()
return source, hidden, self.data
class TestTokenizationWithZone(BaseTokenization):
ZONE = True
def testTokenizationSpecificZoneID(self):
specific_zone_id_table = sa.Table(
'test_tokenization_specific_zone_id', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('zone_id', sa.LargeBinary(length=COLUMN_DATA_SIZE)),
sa.Column('nullable_column', sa.Text, nullable=True),
sa.Column('empty', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('token_i32', sa.Integer()),
sa.Column('token_i64', sa.BigInteger()),
sa.Column('token_str', sa.Text),
sa.Column('token_bytes', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('token_email', sa.Text),
extend_existing=True,
)
metadata.create_all(self.engine_raw, [specific_zone_id_table])
self.engine1.execute(specific_zone_id_table.delete())
zone_id = zones[0][ZONE_ID]
data = {
'id': 1,
'nullable_column': None,
'empty': b'',
'zone_id': zone_id.encode('ascii'),
'token_i32': random_int32(),
'token_i64': random_int64(),
'token_str': random_str(),
'token_bytes': random_bytes(),
'token_email': random_email(),
}
# insert data data using connector client_id==keypair1
self.insert_via_1(specific_zone_id_table.insert(), data)
# expect that source data will returned from all connectors with correct zone id
source_data = self.fetch_from_2(
sa.select([specific_zone_id_table])
.where(specific_zone_id_table.c.id == data['id']))
hidden_data = self.fetch_from_1(
sa.select([specific_zone_id_table])
.where(specific_zone_id_table.c.id == data['id']))
if len(source_data) != len(hidden_data) != 1:
self.fail('incorrect len of result data')
token_fields = ('token_i32', 'token_i64', 'token_str', 'token_bytes', 'token_email')
# data owner take source data
for k in token_fields:
if isinstance(source_data[0][k], bytearray) and isinstance(data[k], str):
self.assertEqual(source_data[0][k], bytearray(data[k], encoding='utf-8'))
self.assertEqual(hidden_data[0][k], bytearray(data[k], encoding='utf-8'))
else:
self.assertEqual(source_data[0][k], data[k])
self.assertEqual(hidden_data[0][k], data[k])
# expect that source data will not returned from all connectors with incorrect zone id
columns = [sa.cast(zones[1][ZONE_ID].encode('ascii'), BYTEA)]
# all columns except zone id
columns.extend([i for i in list(specific_zone_id_table.c) if i.name != 'zone_id'])
source_data = self.engine2.execute(
sa.select(columns)
.where(specific_zone_id_table.c.id == data['id']))
source_data = source_data.fetchall()
for i in token_fields:
self.assertNotEqual(source_data[0][i], data[i])
def testTokenizationSpecificZoneIDStarExpression(self):
specific_zone_id_table = sa.Table(
'test_tokenization_specific_zone_id_star_expression', metadata,
sa.Column('id', sa.Integer, primary_key=True),
# don't store zoneID in table
#sa.Column('zone_id', sa.LargeBinary(length=COLUMN_DATA_SIZE)),
sa.Column('nullable_column', sa.Text, nullable=True),
sa.Column('empty', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('token_i32', sa.Integer()),
sa.Column('token_i64', sa.BigInteger()),
sa.Column('token_str', sa.Text),
sa.Column('token_bytes', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('token_email', sa.Text),
extend_existing=True,
)
metadata.drop_all(self.engine_raw, [specific_zone_id_table])
metadata.create_all(self.engine_raw, [specific_zone_id_table])
self.engine1.execute(specific_zone_id_table.delete())
data = {
'id': 1,
'nullable_column': None,
'empty': b'',
'token_i32': random_int32(),
'token_i64': random_int64(),
'token_str': random_str(),
'token_bytes': random_bytes(),
'token_email': random_email(),
}
# insert data data using connector client_id==keypair1
self.insert_via_1(specific_zone_id_table.insert(), data)
CORRECT_ZONE, INCORRECT_ZONE = range(2)
# expect that source data will not returned from all connectors with incorrect zone id
columns = [
sa.literal(zones[CORRECT_ZONE][ZONE_ID]),
# mysql doesn't support query like `select 'string', * from table1`, only qualified StarExpr like `select 'string', t1.* from table1 as t1`
sa.text('{}.*'.format(specific_zone_id_table.name))
]
# expect that source data will returned from all connectors with correct zone id
source_data = self.fetch_from_2(
sa.select(columns, from_obj=specific_zone_id_table)
.where(specific_zone_id_table.c.id == data['id']))
hidden_data = self.fetch_from_1(
sa.select(columns, from_obj=specific_zone_id_table)
.where(specific_zone_id_table.c.id == data['id']))
if len(source_data) != len(hidden_data) != 1:
self.fail('incorrect len of result data')
token_fields = ('token_i32', 'token_i64', 'token_str', 'token_bytes', 'token_email')
# data owner take source data
for k in token_fields:
if isinstance(source_data[0][k], bytearray) and isinstance(data[k], str):
self.assertEqual(utils.memoryview_to_bytes(source_data[0][k]), bytearray(data[k], encoding='utf-8'))
self.assertEqual(utils.memoryview_to_bytes(hidden_data[0][k]), bytearray(data[k], encoding='utf-8'))
else:
self.assertEqual(utils.memoryview_to_bytes(source_data[0][k]), data[k])
self.assertEqual(utils.memoryview_to_bytes(hidden_data[0][k]), data[k])
# expect that source data will not returned from all connectors with incorrect zone id
columns = [
sa.literal(zones[INCORRECT_ZONE][ZONE_ID]),
sa.text('{}.*'.format(specific_zone_id_table.name))
]
source_data = self.engine2.execute(
sa.select(columns)
.where(specific_zone_id_table.c.id == data['id']))
source_data = source_data.fetchall()
for i in token_fields:
self.assertNotEqual(utils.memoryview_to_bytes(source_data[0][i]), data[i])
class TestTokenizationWithoutZoneWithBoltDB(BaseTokenizationWithBoltDB, TestTokenizationWithoutZone):
pass
class TestTokenizationWithZoneWithBoltDB(BaseTokenizationWithBoltDB, TestTokenizationWithZone):
pass
class TestTokenizationWithoutZoneWithRedis(BaseTokenizationWithRedis, TestTokenizationWithoutZone):
pass
class TestTokenizationWithZoneWithRedis(BaseTokenizationWithRedis, TestTokenizationWithZone):
pass
class TestTokenizationWithoutZoneBinaryMySQL(BaseTokenizationWithBinaryMySQL, TestTokenizationWithoutZone):
pass
class TestTokenizationWithZoneBinaryMySQL(BaseTokenizationWithBinaryMySQL, TestTokenizationWithZone):
pass
class TestTokenizationWithoutZoneTextPostgreSQL(BaseTokenizationWithTextPostgreSQL, TestTokenizationWithoutZone):
pass
class TestTokenizationWithZoneTextPostgreSQL(BaseTokenizationWithTextPostgreSQL, TestTokenizationWithZone):
pass
class TestTokenizationWithoutZoneBinaryPostgreSQL(BaseTokenizationWithBinaryPostgreSQL, TestTokenizationWithoutZone):
pass
class TestTokenizationWithZoneBinaryPostgreSQL(BaseTokenizationWithBinaryPostgreSQL, TestTokenizationWithZone):
pass
class TestTokenizationWithoutZoneBinaryBindMySQL(BaseTokenizationWithBinaryBindMySQL, TestTokenizationWithoutZone):
pass
class TestTokenizationWithZoneBinaryBindMySQL(BaseTokenizationWithBinaryBindMySQL, TestTokenizationWithZone):
pass
class BaseMasking(BaseTokenization):
WHOLECELL_MODE = False
ENCRYPTOR_CONFIG = get_encryptor_config('tests/ee_masking_config.yaml')
def check_crypto_envelope(self, table, row_id):
temp_acrastruct = create_acrastruct_with_client_id(b'somedata', 'keypair1')
# expect that data was encrypted with client_id from connector which used to insert (client_id==keypair1)
source_data = self.engine_raw.execute(
sa.select([table])
.where(table.c.id == row_id))
source_data = source_data.fetchone()
for i in ('masked_prefix', 'masked_suffix', 'masked_without_plaintext', 'exact_plaintext_length',
'shorter_plaintext'):
# check that data contains AcraStruct tag begin
self.assertIn(temp_acrastruct[:8], source_data[i])
def get_specified_client_id(self):
return 'keypair2'
def fork_acra(self, popen_kwargs: dict = None, **acra_kwargs: dict):
prepare_encryptor_config(
client_id=self.get_specified_client_id(), zone_id=zones[0][ZONE_ID], config_path=self.ENCRYPTOR_CONFIG)
acra_kwargs.update(token_db='token1.db',
encryptor_config_file=get_test_encryptor_config(self.ENCRYPTOR_CONFIG))
return super(BaseTokenization, self).fork_acra(popen_kwargs, **acra_kwargs)
def executeInsert(self, query, values):
"""Execute a Bulk Insert query with list of values via AcraConnector for "keypair1"."""
return self.engine1.execute(query.values(values))
def executeBulkInsert(self, query, values):
"""Execute a Bulk Insert query with list of values via AcraConnector for "keypair1"."""
return self.engine1.execute(query.values(values))
def tearDown(self):
super().tearDown()
os.remove('token1.db')
class BaseMaskingBinaryPostgreSQLMixin(BaseBinaryPostgreSQLTestCase, BaseTestCase):
def executeInsert(self, query, values):
"""Execute a Insert query with list of values via AcraConnector for "keypair1"."""
query, parameters = self.compileInsertQuery(query, values)
return self.executor1.execute_prepared_statement(query, parameters)
def executeBulkInsert(self, query, values):
"""Execute a Bulk Insert query with list of values via AcraConnector for "keypair1"."""
query, parameters = self.compileBulkInsertQuery(query.values(values), values)
return self.executor1.execute_prepared_statement(query, parameters)
class BaseMaskingBinaryMySQLMixin(BaseBinaryMySQLTestCase, BaseTestCase):
def executeInsert(self, query, values):
"""Execute a Insert query with list of values via AcraConnector for "keypair1"."""
query, parameters = self.compileInsertQuery(query, values)
return self.executor1.execute_prepared_statement_no_result(query, parameters)
def executeBulkInsert(self, query, values):
"""Execute a Bulk Insert query with list of values via AcraConnector for "keypair1"."""
query, parameters = self.compileBulkInsertQuery(query.values(values), values)
return self.executor1.execute_prepared_statement_no_result(query, parameters)
class TestMaskingWithoutZone(BaseMasking):
def test_masking_default_client_id(self):
default_client_id_table = sa.Table(
'test_masking_default_client_id', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('nullable_column', sa.Text, nullable=True),
sa.Column('empty', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('masked_prefix', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('masked_suffix', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('masked_without_plaintext', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('exact_plaintext_length', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('shorter_plaintext', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
extend_existing=True
)
metadata.create_all(self.engine_raw, [default_client_id_table])
self.engine_raw.execute(default_client_id_table.delete())
data = {
'id': 1,
'nullable_column': None,
'empty': b'',
'masked_prefix': random_bytes(9),
'masked_suffix': random_bytes(9),
'masked_without_plaintext': random_bytes(),
'exact_plaintext_length': random_bytes(10),
'shorter_plaintext': random_bytes(9),
}
# insert data data with another client_id (keypair2) than should be encrypted (keypair1)
self.executeInsert(default_client_id_table.insert(), data)
self.check_crypto_envelope(default_client_id_table, data['id'])
# expect that data was encrypted with client_id from connector which used to insert (client_id==keypair1)
source_data = self.engine1.execute(
sa.select([default_client_id_table])
.where(default_client_id_table.c.id == data['id']))
source_data = source_data.fetchall()
hidden_data = self.engine2.execute(
sa.select([default_client_id_table])
.where(default_client_id_table.c.id == data['id']))
hidden_data = hidden_data.fetchall()
if len(source_data) != len(hidden_data) != 1:
self.fail('incorrect len of result data')
for i in ('masked_prefix', 'masked_suffix', 'masked_without_plaintext', 'exact_plaintext_length', 'shorter_plaintext'):
self.assertEqual(source_data[0][i], data[i])
hidden_data = hidden_data[0]
mask_pattern = 'xxxx'.encode('ascii')
# check that mask at correct place
self.assertEqual(hidden_data['masked_prefix'][:len(mask_pattern)], mask_pattern)
# check that len of masked value not equal to source data because acrastruct always longer than plaintext
self.assertNotEqual(len(hidden_data['masked_prefix']), len(data['masked_prefix']))
# check that data after mask is not the same as source data
self.assertNotEqual(hidden_data['masked_prefix'][len(mask_pattern):], data)
# check that data after mask is not the same as source data with same offset as mask length
self.assertNotEqual(hidden_data['masked_prefix'][len(mask_pattern):], data['masked_prefix'][len(mask_pattern):])
# check that mask at correct place
self.assertEqual(hidden_data['masked_suffix'][-len(mask_pattern):], mask_pattern)
# check that len of masked value not equal to source data because acrastruct always longer than plaintext
self.assertNotEqual(len(hidden_data['masked_suffix']), len(data['masked_suffix']))
# check that data before mask is not the same as source data
self.assertNotEqual(hidden_data['masked_suffix'][:-len(mask_pattern)], data)
# check that data after mask is not the same as source data with same offset as mask length
self.assertNotEqual(hidden_data['masked_suffix'][:-len(mask_pattern)], data['masked_suffix'][:-len(mask_pattern)])
self.assertEqual(mask_pattern, hidden_data['masked_without_plaintext'])
# if plaintext length > data, then whole data will be encrypted
self.assertEqual(mask_pattern, hidden_data['exact_plaintext_length'])
self.assertEqual(mask_pattern, hidden_data['shorter_plaintext'])
def test_masking_specific_client_id(self):
specific_client_id_table = sa.Table(
'test_masking_specific_client_id', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('nullable_column', sa.Text, nullable=True),
sa.Column('empty', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('masked_prefix', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('masked_suffix', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('masked_without_plaintext', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('exact_plaintext_length', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
sa.Column('shorter_plaintext', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
extend_existing=True
)
metadata.create_all(self.engine_raw, [specific_client_id_table])
self.engine_raw.execute(specific_client_id_table.delete())
data = {
'id': 1,
'nullable_column': None,
'empty': b'',
'masked_prefix': random_bytes(9),
'masked_suffix': random_bytes(9),
'masked_without_plaintext': random_bytes(),
'exact_plaintext_length': random_bytes(10),
'shorter_plaintext': random_bytes(9),
}
# insert data data with another client_id (keypair1) than should be encrypted (keypair2)
self.executeInsert(specific_client_id_table.insert(), data)
self.check_crypto_envelope(specific_client_id_table, data['id'])
# expect that data was encrypted with client_id from connector which used to insert (client_id==keypair2)
source_data = self.engine2.execute(
sa.select([specific_client_id_table])
.where(specific_client_id_table.c.id == data['id']))
source_data = source_data.fetchall()
hidden_data = self.engine1.execute(
sa.select([specific_client_id_table])
.where(specific_client_id_table.c.id == data['id']))
hidden_data = hidden_data.fetchall()
if len(source_data) != len(hidden_data) != 1:
self.fail('incorrect len of result data')
for i in ('masked_prefix', 'masked_suffix', 'masked_without_plaintext', 'exact_plaintext_length', 'shorter_plaintext'):
self.assertEqual(source_data[0][i], data[i])
hidden_data = hidden_data[0]
mask_pattern = 'xxxx'.encode('ascii')
# check that mask at correct place
self.assertEqual(hidden_data['masked_prefix'][:len(mask_pattern)], mask_pattern)
# check that len of masked value not equal to source data because acrastruct always longer than plaintext
self.assertNotEqual(len(hidden_data['masked_prefix']), len(data['masked_prefix']))
# check that data after mask is not the same as source data
self.assertNotEqual(hidden_data['masked_prefix'][len(mask_pattern):], | |
such as logic libraries and information model descriptions, as well as to
describe a collection of knowledge assets.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Library = ResourceTypeCode("Library")
"""
Identifies two or more records (resource instances) that refer to the same
real-world "occurrence".
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Linkage = ResourceTypeCode("Linkage")
"""
A list is a curated collection of resources.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
List_ = ResourceTypeCode("List")
"""
Details and position information for a physical place where services are
provided and resources and participants may be stored, found, contained, or
accommodated.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Location = ResourceTypeCode("Location")
"""
The Measure resource provides the definition of a quality measure.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Measure = ResourceTypeCode("Measure")
"""
The MeasureReport resource contains the results of the calculation of a
measure; and optionally a reference to the resources involved in that
calculation.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MeasureReport = ResourceTypeCode("MeasureReport")
"""
A photo, video, or audio recording acquired or used in healthcare. The actual
content may be inline or provided by direct reference.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Media = ResourceTypeCode("Media")
"""
This resource is primarily used for the identification and definition of a
medication for the purposes of prescribing, dispensing, and administering a
medication as well as for making statements about medication use.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Medication = ResourceTypeCode("Medication")
"""
Describes the event of a patient consuming or otherwise being administered a
medication. This may be as simple as swallowing a tablet or it may be a long
running infusion. Related resources tie this event to the authorizing
prescription, and the specific encounter between patient and health care
practitioner.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicationAdministration = ResourceTypeCode("MedicationAdministration")
"""
Indicates that a medication product is to be or has been dispensed for a named
person/patient. This includes a description of the medication product
(supply) provided and the instructions for administering the medication. The
medication dispense is the result of a pharmacy system responding to a
medication order.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicationDispense = ResourceTypeCode("MedicationDispense")
"""
Information about a medication that is used to support knowledge.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicationKnowledge = ResourceTypeCode("MedicationKnowledge")
"""
An order or request for both supply of the medication and the instructions for
administration of the medication to a patient. The resource is called
"MedicationRequest" rather than "MedicationPrescription" or "MedicationOrder"
to generalize the use across inpatient and outpatient settings, including care
plans, etc., and to harmonize with workflow patterns.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicationRequest = ResourceTypeCode("MedicationRequest")
"""
A record of a medication that is being consumed by a patient. A
MedicationStatement may indicate that the patient may be taking the medication
now or has taken the medication in the past or will be taking the medication
in the future. The source of this information can be the patient, significant
other (such as a family member or spouse), or a clinician. A common scenario
where this information is captured is during the history taking process during
a patient visit or stay. The medication information may come from sources
such as the patient's memory, from a prescription bottle, or from a list of
medications the patient, clinician or other party maintains.
The primary difference between a medication statement and a medication
administration is that the medication administration has complete
administration information and is based on actual administration information
from the person who administered the medication. A medication statement is
often, if not always, less specific. There is no required date/time when the
medication was administered, in fact we only know that a source has reported
the patient is taking this medication, where details such as time, quantity,
or rate or even medication product may be incomplete or missing or less
precise. As stated earlier, the medication statement information may come
from the patient's memory, from a prescription bottle or from a list of
medications the patient, clinician or other party maintains. Medication
administration is more formal and is not missing detailed information.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicationStatement = ResourceTypeCode("MedicationStatement")
"""
Detailed definition of a medicinal product, typically for uses other than
direct patient care (e.g. regulatory use).
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProduct = ResourceTypeCode("MedicinalProduct")
"""
The regulatory authorization of a medicinal product.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductAuthorization = ResourceTypeCode("MedicinalProductAuthorization")
"""
The clinical particulars - indications, contraindications etc. of a medicinal
product, including for regulatory purposes.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductContraindication = ResourceTypeCode(
"MedicinalProductContraindication"
)
"""
Indication for the Medicinal Product.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductIndication = ResourceTypeCode("MedicinalProductIndication")
"""
An ingredient of a manufactured item or pharmaceutical product.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductIngredient = ResourceTypeCode("MedicinalProductIngredient")
"""
The interactions of the medicinal product with other medicinal products, or
other forms of interactions.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductInteraction = ResourceTypeCode("MedicinalProductInteraction")
"""
The manufactured item as contained in the packaged medicinal product.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductManufactured = ResourceTypeCode("MedicinalProductManufactured")
"""
A medicinal product in a container or package.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductPackaged = ResourceTypeCode("MedicinalProductPackaged")
"""
A pharmaceutical product described in terms of its composition and dose form.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductPharmaceutical = ResourceTypeCode("MedicinalProductPharmaceutical")
"""
Describe the undesirable effects of the medicinal product.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MedicinalProductUndesirableEffect = ResourceTypeCode(
"MedicinalProductUndesirableEffect"
)
"""
Defines the characteristics of a message that can be shared between systems,
including the type of event that initiates the message, the content to be
transmitted and what response(s), if any, are permitted.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MessageDefinition = ResourceTypeCode("MessageDefinition")
"""
The header for a message exchange that is either requesting or responding to
an action. The reference(s) that are the subject of the action as well as
other information related to the action are typically transmitted in a bundle
in which the MessageHeader resource instance is the first resource in the
bundle.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MessageHeader = ResourceTypeCode("MessageHeader")
"""
Raw data describing a biological sequence.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
MolecularSequence = ResourceTypeCode("MolecularSequence")
"""
A curated namespace that issues unique symbols within that namespace for the
identification of concepts, people, devices, etc. Represents a "System" used
within the Identifier and Coding data types.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
NamingSystem = ResourceTypeCode("NamingSystem")
"""
A request to supply a diet, formula feeding (enteral) or oral nutritional
supplement to a patient/resident.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
NutritionOrder = ResourceTypeCode("NutritionOrder")
"""
Measurements and simple assertions made about a patient, device or other
subject.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Observation = ResourceTypeCode("Observation")
"""
Set of definitional characteristics for a kind of observation or measurement
produced or consumed by an orderable health care service.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
ObservationDefinition = ResourceTypeCode("ObservationDefinition")
"""
A formal computable definition of an operation (on the RESTful interface) or a
named query (using the search interaction).
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
OperationDefinition = ResourceTypeCode("OperationDefinition")
"""
A collection of error, warning, or information messages that result from a
system action.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
OperationOutcome = ResourceTypeCode("OperationOutcome")
"""
A formally or informally recognized grouping of people or organizations formed
for the purpose of achieving some form of collective action. Includes
companies, institutions, corporations, departments, community groups,
healthcare practice groups, payer/insurer, etc.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Organization = ResourceTypeCode("Organization")
"""
Defines an affiliation/assotiation/relationship between 2 distinct
oganizations, that is not a part-of relationship/sub-division relationship.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
OrganizationAffiliation = ResourceTypeCode("OrganizationAffiliation")
"""
This resource is a non-persisted resource used to pass information into and
back from an [operation](operations.html). It has no other use, and there is
no RESTful endpoint associated with it.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Parameters = ResourceTypeCode("Parameters")
"""
Demographics and other administrative information about an individual or
animal receiving care or other health-related services.
From: http://hl7.org/fhir/resource-types in valuesets.xml
"""
Patient = ResourceTypeCode("Patient")
"""
This | |
i in metadata_rows)]
command+=["--legend_file", metadata_legend_file]
if len(metadata_rows) > 10:
command+=["--metadata_height","0.8"]
elif len(metadata_rows) > 4:
command+=["--metadata_height","0.4"]
elif len(metadata_rows) > 1:
command+=["--metadata_height","0.1"]
# if more than the max samples, do not include sample labels on the heatmap
if len(sample_names) > self.max_labels:
command+=["--no_slabels"]
# if more than max labels, do not include the feature labels on the heatmap
if len(feature_names) > self.max_labels:
command+=["--no_flabels"]
try:
output=subprocess.check_output(command)
# read the heatmap png file
heatmap=pyplot.imread(heatmap_file)
except (subprocess.CalledProcessError, OSError):
print("Unable to generate heatmap.")
heatmap=[]
# if the output file is provided, then just print out a link to it in the doc
if outfilename:
if os.path.isfile(heatmap_file):
print("\n\n{#id .class width=540px height=405px}\n\n")
if metadata_rows:
print("\n\n{#id .class width=540px height=405px}\n\n")
else:
# create a subplot and remove the frame and axis labels
# set the figure and increase the dpi for small text
fig = pyplot.figure(figsize=(6,6),dpi=dpi)
if metadata_rows:
subplot1 = pyplot.subplot2grid((4,1),(0,0), rowspan=3, frame_on=False)
subplot1.xaxis.set_visible(False)
subplot1.yaxis.set_visible(False)
else:
subplot = fig.add_subplot(111, frame_on=False)
subplot.xaxis.set_visible(False)
subplot.yaxis.set_visible(False)
# show but do not interpolate (as this will make the text hard to read)
pyplot.imshow(heatmap, interpolation="none")
if metadata_rows:
heatmap_legend = pyplot.imread(metadata_legend_file)
# metadata legend subplot
subplot2 = pyplot.subplot2grid((4,1),(3,0), rowspan=1, frame_on=False)
subplot2.xaxis.set_visible(False)
subplot2.yaxis.set_visible(False)
pyplot.imshow(heatmap_legend, interpolation="none")
pyplot.draw()
# adjust the heatmap to fit in the figure area
# this is needed to increase the image size (to fit in the increased figure)
pyplot.tight_layout()
def _run_r(self, commands, args=None):
""" Run R on the commands providing the arguments """
if args is None:
args=[]
proc=subprocess.Popen(["R","--vanilla","--quiet","--args"]+args,
stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc.communicate(input=bytearray("\n".join(commands),'utf-8'))
def filter_zero_rows(self, row_names, data):
""" Filter the rows from the data set that sum to zero
:param row_names: The names of the rows
:type row_names: list
:param data: A list of lists containing the data
:type data: list
"""
new_names=[]
new_data=[]
for name, row in zip(row_names, data):
if sum(row) != 0:
new_names.append(name)
new_data.append(row)
return new_names, new_data
def filter_zero_columns(self, column_names, data):
""" Filter the columns from the data set that sum to zero
:param column_names: The names of the columns
:type column_names: list
:param data: A list of lists containing the data
:type data: list
"""
import numpy
new_names, new_data = self.filter_zero_rows(column_names, numpy.transpose(data))
data_temp = []
for row in numpy.transpose(new_data):
data_temp.append(list(row))
new_data = data_temp
return new_names, new_data
def compute_pcoa(self, sample_names, feature_names, data, apply_transform):
""" Use the vegan package in R to compute a PCoA.
Input data should be organized with samples as columns and features as rows.
Data should be scaled to [0-1] if transform is to be applied.
:param sample_names: The labels for the columns
:type sample_names: list
:param feature_names: The labels for the data rows
:type feature_names: list
:param data: A list of lists containing the data
:type data: list
:keyword apply_transform: Arcsin transform to be applied
:type apply_transform: bool
"""
r_vegan_pcoa=[
"library(vegan)",
"args<-commandArgs(TRUE)",
"data<-read.table(args[1],sep='\\t',header=TRUE, row.names=1)",
"data.t<-as.data.frame(t(data))"]
if apply_transform:
r_vegan_pcoa+=["pcoa<-capscale(asin(sqrt(data.t))~1,distance='bray')"]
else:
r_vegan_pcoa+=["pcoa<-capscale(data.t~1,distance='bray')"]
r_vegan_pcoa+=[
"write.table(head(eigenvals(pcoa)/sum(eigenvals(pcoa))),args[2],sep='\\t')",
"write.table(as.data.frame(scores(pcoa,display='sites')),args[3],sep='\\t')"]
# test that the data is scaled to [0-1]
if apply_transform:
for row in data:
out_of_range=list(filter(lambda x: x < 0 or x > 1, row))
if len(out_of_range) > 0:
raise ValueError("Provide data to the AnADAMA2 document.show_pcoa function in the range of [0-1].")
# test for duplicate feature names
feature_set=set(feature_names)
if len(list(feature_set)) < len(feature_names):
raise ValueError("Do not provide duplicate feature names to document.show_pcoa.")
# test samples are provided as the columns of the data
if len(data[0]) != len(sample_names):
raise ValueError("Provide data to the AnADAMA2 document.show_pcoa function in the form of samples as columns.")
# test features are provided as rows of the data
if len(data) != len(feature_names):
raise ValueError("Provide data to the AnADAMA2 document.show_pcoa function in the form of features as rows.")
# remove any samples from the data for which all features are zero
sample_names, data = self.filter_zero_columns(sample_names, data)
# remove any features from the data for which all samples have zero values
feature_names, data = self.filter_zero_rows(feature_names, data)
# write a file of the data
handle, vegan_input_file=tempfile.mkstemp(prefix="vegan_input",dir=os.getcwd())
eigenvalues_file=vegan_input_file+".eigen"
scores_file=vegan_input_file+".scores"
self.write_table(["# "]+sample_names,feature_names,data,vegan_input_file)
self._run_r(r_vegan_pcoa,[vegan_input_file,eigenvalues_file,scores_file])
# get the x and y labels
r_run_error=False
try:
columns, rows, data = self.read_table(eigenvalues_file)
except EnvironmentError:
print("No eigenvalues found")
data=[[0],[0]]
r_run_error=True
pcoa1_x_label=int(data[0][0]*100)
pcoa2_y_label=int(data[1][0]*100)
# get the scores to plot
try:
columns, rows, pcoa_data = self.read_table(scores_file)
except EnvironmentError:
print("No scores found")
r_run_error=True
columns=[]
rows=[]
pcoa_data=[]
# if there were no errors, remove the temp files
if not r_run_error:
try:
os.remove(vegan_input_file)
os.remove(eigenvalues_file)
os.remove(scores_file)
except EnvironmentError:
print("Warning: Unable to remove temp files")
return pcoa_data, pcoa1_x_label, pcoa2_y_label
def show_pcoa_multiple_plots(self, sample_names, feature_names, data, title, abundances, legend_title="% Abundance", sample_types="samples", feature_types="species", apply_transform=False):
""" Use the vegan package in R plus matplotlib to plot a PCoA.
Input data should be organized with samples as columns and features as rows.
Data should be scaled to [0-1] if transform is to be applied.
Show multiple PCoA plots as subplots each with coloring based on abundance.
:param sample_names: The labels for the columns
:type sample_names: list
:param feature_names: The labels for the data rows
:type feature_names: list
:param data: A list of lists containing the data
:type data: list
:param title: The title for the plot
:type title: str
:param abundances: The sets of abundance data and names for the subplots
:type abundances: dict
:keyword legend_title: The title for the legend
:type legend_title: str
:keyword sample_types: What type of data are the columns
:type sample_types: str
:keyword feature_types: What type of data are the rows
:type feature_types: str
:keyword apply_transform: Arcsin transform to be applied
:type apply_transform: bool
"""
import numpy
import matplotlib.pyplot as pyplot
from matplotlib import cm
pcoa_data, pcoa1_x_label, pcoa2_y_label=self.compute_pcoa(sample_names, feature_names, data, apply_transform)
# create a figure and subplots
nrows = len(abundances.keys())/2
figure, axis = pyplot.subplots(nrows=nrows,ncols=2)
# if needed, modify matrix of axis to list
reformatted_axis = []
if isinstance(axis[0],numpy.ndarray):
for axis_list in axis:
reformatted_axis+=axis_list.tolist()
axis=reformatted_axis
figure.suptitle(title,fontsize=12,y=1.002)
x_values = [x for x,y in pcoa_data]
y_values = [y for x,y in pcoa_data]
for subplot, abundance_name in zip(axis,sorted(abundances.keys())):
pcoa_plot=subplot.scatter(x_values,y_values,c=abundances[abundance_name],cmap=cm.jet)
figure.colorbar(pcoa_plot,ax=subplot,label=legend_title)
subplot.set_title(abundance_name)
subplot.set(xlabel="PCoA 1 ("+str(pcoa1_x_label)+" %)",ylabel="PCoA 2 ("+str(pcoa2_y_label)+" %)")
subplot.tick_params(axis="both",bottom="off",labelbottom="off",left="off",labelleft="off")
# adjust spacing between subplots
figure.tight_layout()
pyplot.draw()
def show_pcoa(self, sample_names, feature_names, data, title, sample_types="samples", feature_types="species",
metadata=None, apply_transform=False, sort_function=None, metadata_type=None, outfilename=None):
""" Use the vegan package in R plus matplotlib to plot a PCoA.
Input data should be organized with samples as columns and features as rows.
Data should be scaled to [0-1] if transform is to be applied.
:param sample_names: The labels for the columns
:type sample_names: list
:param feature_names: The labels for the data rows
:type feature_names: list
:param data: A list of lists containing the data
:type data: list
:param title: The title for the plot
:type title: str
:keyword sample_types: What type of data are the columns
:type sample_types: str
:keyword feature_types: What type of data are the rows
:type feature_types: str
:keyword metadata: Metadata for each sample
:type metadata: dict
:keyword metadata_type: Type of metadata (continuous or categorical)
:type metadata_type: str
:keyword apply_transform: Arcsin transform to be applied
:type apply_transform: bool
:keyword sort_function: The function to sort the plot data
:type sort_function: lambda
"""
import matplotlib.pyplot as pyplot
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.patches as mpatches
import numpy as np
pcoa_data, pcoa1_x_label, pcoa2_y_label = self.compute_pcoa(sample_names, feature_names, data, apply_transform)
# create a figure subplot to move the legend
figure = pyplot.figure(figsize=(10,6),dpi=150)
subplot = pyplot.subplot(111)
nancolor="grey"
# create a set of custom colors to prevent overlap
if metadata:
metadata_categories = list(set(metadata.values()))
custom_colors = self._custom_colors(total_colors=len(metadata_categories))
if metadata_type == 'con':
cleaned_array = [value for value in metadata_categories if ~np.isnan(value)]
normalize = mcolors.Normalize(vmin=min(cleaned_array), vmax=max(cleaned_array))
colormap = pyplot.get_cmap('jet')
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=colormap)
scalarmappaple.set_array(cleaned_array)
custom_colors_cont = []
for value in metadata_categories:
if np.isnan(value):
custom_colors_cont.append(nancolor)
else:
custom_colors_cont.append(colormap(normalize(value)))
colors_by_metadata = dict((key, color) for key, color in zip(metadata_categories, custom_colors_cont))
else:
colors_by_metadata = | |
<gh_stars>1-10
import cPickle
import mxnet as mx
from symbols.symbol import Symbol
from operator_py.box_annotator_ohem import *
from operator_py.debug_data import *
import numpy as np
def checkpoint_callback(bbox_param_names, prefix, means, stds):
def _callback(iter_no, sym, arg, aux):
#weight = arg[bbox_param_names[0]]
#bias = arg[bbox_param_names[1]]
#stds = np.array([0.1, 0.1, 0.2, 0.2])
#arg[bbox_param_names[0]+'_test'] = (weight.T * mx.nd.array(stds)).T
#arg[bbox_param_names[1]+'_test'] =bias * mx.nd.array(stds)
mx.model.save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
#arg.pop(bbox_param_names[0]+'_test')
#arg.pop(bbox_param_names[1]+'_test')
return _callback
class resnet_mx_101_e2e_3k(Symbol):
def __init__(self, n_proposals=400, momentum=0.95, fix_bn=False, test_nbatch=1):
"""
Use __init__ to define parameter network needs
"""
self.momentum = momentum
self.use_global_stats = True
self.workspace = 512
self.units = (3, 4, 23, 3) # use for 101
self.filter_list = [64, 256, 512, 1024, 2048]
self.fix_bn = fix_bn
self.test_nbatch= test_nbatch
def get_bbox_param_names(self):
return ['bbox_pred_weight', 'bbox_pred_bias']
def residual_unit(self, data, num_filter, stride, dim_match, name, bn_mom=0.9, workspace=512, memonger=False,
fix_bn=False):
if fix_bn or self.fix_bn:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn1')
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1),
pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
if fix_bn or self.fix_bn:
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn2')
else:
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride,
pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if fix_bn or self.fix_bn:
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn3')
else:
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def residual_unit_dilate(self, data, num_filter, stride, dim_match, name, bn_mom=0.9, workspace=512,
memonger=False):
if self.fix_bn:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn1')
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1),
pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
if self.fix_bn:
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn2')
else:
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), dilate=(2, 2),
stride=stride, pad=(2, 2),
no_bias=True, workspace=workspace, name=name + '_conv2')
if self.fix_bn:
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn3')
else:
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def residual_unit_deform(self, data, num_filter, stride, dim_match, name, bn_mom=0.9, workspace=512,
memonger=False):
if self.fix_bn:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn1')
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1),
pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
if self.fix_bn:
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn2')
else:
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
offset = mx.symbol.Convolution(name=name + '_offset', data=act2,
num_filter=72, pad=(2, 2), kernel=(3, 3), stride=(1, 1),
dilate=(2, 2), cudnn_off=True)
conv2 = mx.contrib.symbol.DeformableConvolution(name=name + '_conv2', data=act2,
offset=offset,
num_filter=512, pad=(2, 2), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), dilate=(2, 2), no_bias=True)
if self.fix_bn:
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, use_global_stats=True, name=name + '_bn3')
else:
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=self.momentum, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def get_rpn(self, conv_feat, num_anchors):
conv_feat = mx.sym.Cast(data=conv_feat, dtype=np.float32)
rpn_conv = mx.sym.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.sym.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.sym.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
return rpn_cls_score, rpn_bbox_pred
def get_symbol_rcnn(self, cfg, is_train=True):
num_anchors = cfg.network.NUM_ANCHORS
num_classes = 3131
# input init
if is_train:
data = mx.sym.Variable(name="data")
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name='gt_boxes')
valid_ranges = mx.sym.Variable(name='valid_ranges')
im_info = mx.sym.Variable(name='im_info')
else:
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name='im_info')
im_ids = mx.sym.Variable(name='im_ids')
if cfg.TRAIN.fp16 == True:
grad_scale = float(cfg.TRAIN.scale)
else:
grad_scale = 1.0
# shared convolutional layers
conv_feat = self.resnetc4(data, fp16=cfg.TRAIN.fp16)
# res5
relut = self.resnetc5(conv_feat, deform=True)
relu1 = mx.symbol.Concat(*[conv_feat, relut], name='cat4')
relu1 = mx.sym.Cast(data=relu1, dtype=np.float32)
rpn_cls_score, rpn_bbox_pred = self.get_rpn(conv_feat, num_anchors)
rpn_cls_score_reshape = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0),
name="rpn_cls_score_reshape")
if is_train:
rpn_cls_prob = mx.sym.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1,
name="rpn_cls_prob", grad_scale=grad_scale)
rois, label, bbox_target, bbox_weight, posrois, _, poslabels = mx.sym.MultiProposalTargetMask(cls_prob=rpn_cls_prob, bbox_pred=rpn_bbox_pred, im_info=im_info, gt_boxes=gt_boxes, valid_ranges=valid_ranges, batch_size=cfg.TRAIN.BATCH_IMAGES, scales=cfg.network.ANCHOR_SCALES, rfcn_3k=True, name='multi_proposal_target_mask')
label = mx.symbol.Reshape(data=label, shape=(-1,), name='label_reshape')
sublabel = mx.symbol.Reshape(data=poslabels, shape=(-1,), name='sublabel_reshape')
else:
rpn_cls_prob = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.sym.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
rois, _ = mx.sym.MultiProposal(cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info,
name='rois', batch_size=self.test_nbatch,
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N,
rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
rpn_min_size=cfg.TEST.RPN_MIN_SIZE,
threshold=cfg.TEST.RPN_NMS_THRESH,
feature_stride=cfg.network.RPN_FEAT_STRIDE,
ratios=tuple(cfg.network.ANCHOR_RATIOS),
scales=tuple(cfg.network.ANCHOR_SCALES))
conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name="conv_new_1")
conv_new_1_relu = mx.sym.Activation(data=conv_new_1, act_type='relu', name='conv_new_1_relu')
conv_new_2 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name="conv_new_2")
conv_new_2_relu = mx.sym.Activation(data=conv_new_2, act_type='relu', name='conv_new_2_relu')
num_sub_classes = num_classes - 1
conv_new_3 = mx.sym.Convolution(data=conv_new_2_relu, kernel=(1, 1), num_filter=num_sub_classes, name="conv_new_3")
if is_train:
roipooled_subcls_rois = mx.sym.ROIPooling(name='roipooled_subcls_rois', data=conv_new_3,
rois=posrois, pooled_size=(7, 7), spatial_scale=0.0625)
else:
roipooled_subcls_rois = mx.sym.ROIPooling(name='roipooled_subcls_rois', data=conv_new_3,
rois=rois, pooled_size=(7, 7), spatial_scale=0.0625)
subcls_score = mx.sym.Pooling(name='ave_subcls_scors_rois', data=roipooled_subcls_rois, pool_type='avg',
global_pool=True, kernel=(7, 7))
subcls_score = mx.sym.Reshape(name='subcls_score_reshape', data=subcls_score, shape=(-1, num_sub_classes))
# rfcn_cls/rfcn_bbox
rfcn_cls = mx.sym.Convolution(data=conv_new_1, kernel=(1, 1), num_filter=7*7*2, name="rfcn_cls")
rfcn_bbox = mx.sym.Convolution(data=conv_new_1, kernel=(1, 1), num_filter=7*7*4, name="rfcn_bbox")
psroipooled_cls_rois = mx.contrib.sym.DeformablePSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois,
group_size=7, pooled_size=7, sample_per_part=4, no_trans=True, trans_std=0.1,
output_dim=2, spatial_scale=0.0625, part_size=7)
psroipooled_loc_rois = mx.contrib.sym.DeformablePSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois,
group_size=7, pooled_size=7, sample_per_part=4, no_trans=True, trans_std=0.1,
output_dim=4, spatial_scale=0.0625, part_size=7)
cls_score = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, 2))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4))
# prepare rpn data
if cfg.TRAIN.fp16 == True:
grad_scale = float(cfg.TRAIN.scale)
else:
grad_scale = 1.0
if is_train:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, use_ignore=True, ignore_label=-1, grad_scale=grad_scale / (300.0*16.0))
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 2),
name='cls_prob_reshape')
subcls_prob = mx.sym.SoftmaxOutput(name='subcls_prob', data=subcls_score, label=sublabel, use_ignore=True, ignore_label=-1, grad_scale=grad_scale / (200.0*16.0))
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=grad_scale / (188.0*16.0))
rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=1.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_,
grad_scale=3 * grad_scale / float(cfg.TRAIN.BATCH_IMAGES * cfg.TRAIN.RPN_BATCH_SIZE))
rcnn_label = label
rcnn_sublabel = sublabel
#bbox_loss = mx.sym.Custom(datai1=bbox_loss, datai2=rcnn_label, datai3=rcnn_sublabel, datai4=bbox_weight, op_type='debug_data')
group = mx.sym.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, subcls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label), mx.sym.BlockGrad(rcnn_sublabel)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(self.test_nbatch, -1, 2),
name='cls_prob_reshape')
subcls_prob = mx.sym.SoftmaxActivation(name='subcls_prob', data=subcls_score)
subcls_prob = mx.sym.Reshape(data=subcls_prob, shape=(self.test_nbatch, -1, num_sub_classes),
name='subcls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(self.test_nbatch, -1, 4),
name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred, im_ids, subcls_prob])
self.sym = group
return group
def resnetc4(self, data, fp16=False):
units = self.units
filter_list = self.filter_list
bn_mom = self.momentum
workspace = self.workspace
num_stage = len(units)
memonger = False
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, use_global_stats=True, name='bn_data')
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
if fp16:
body = mx.sym.Cast(data=body, dtype=np.float16)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, use_global_stats=True, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max')
for i in range(num_stage - 1):
body = self.residual_unit(body, filter_list[i + 1], (1 if i == 0 else 2, 1 if i == 0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), workspace=workspace,
memonger=memonger, fix_bn=(i == 0))
for j in range(units[i] - 1):
body = self.residual_unit(body, filter_list[i + 1], (1, 1), True,
name='stage%d_unit%d' % (i + 1, j + 2),
workspace=workspace, memonger=memonger, fix_bn=(i == | |
can be or-combined
sim_drawing_itemcolors =0x00020 # +3 values per item (each item has its own ambient color (rgb values)).
# Mutually exclusive with sim_drawing_vertexcolors
sim_drawing_vertexcolors =0x00040 # +3 values per vertex (each vertex has its own ambient color (rgb values). Only for sim_drawing_lines (+6) and for sim_drawing_triangles(+9)). Mutually exclusive with sim_drawing_itemcolors
sim_drawing_itemsizes =0x00080 # +1 value per item (each item has its own size). Not for sim_drawing_triangles
sim_drawing_backfaceculling =0x00100 # back faces are not displayed for all items
sim_drawing_wireframe =0x00200 # all items displayed in wireframe
sim_drawing_painttag =0x00400 # all items are tagged as paint (for additinal processing at a later stage)
sim_drawing_followparentvisibility =0x00800 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_drawing_cyclic =0x01000 # if the max item count was reached then the first items are overwritten.
sim_drawing_50percenttransparency =0x02000 # the drawing object will be 50% transparent
sim_drawing_25percenttransparency =0x04000 # the drawing object will be 25% transparent
sim_drawing_12percenttransparency =0x08000 # the drawing object will be 12.5% transparent
sim_drawing_emissioncolor =0x10000 # When used in combination with sim_drawing_itemcolors or sim_drawing_vertexcolors then the specified colors will be for the emissive component
sim_drawing_facingcamera =0x20000 # Only for trianglepoints quadpoints discpoints and cubepoints. If specified the normal verctor is calculated to face the camera (each item data requires 3 values less)
sim_drawing_overlay =0x40000 # When specified objects are always drawn on top of "regular objects"
sim_drawing_itemtransparency =0x80000 # +1 value per item (each item has its own transparency value (0-1)). Not compatible with sim_drawing_vertexcolors
# banner values
# following can be or-combined
sim_banner_left =0x00001 # Banners display on the left of the specified point
sim_banner_right =0x00002 # Banners display on the right of the specified point
sim_banner_nobackground =0x00004 # Banners have no background rectangle
sim_banner_overlay =0x00008 # When specified banners are always drawn on top of "regular objects"
sim_banner_followparentvisibility =0x00010 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_banner_clickselectsparent =0x00020 # if the object is associated with a scene object then clicking the banner will select the scene object
sim_banner_clicktriggersevent =0x00040 # if the banner is clicked an event is triggered (sim_message_eventcallback_bannerclicked and sim_message_bannerclicked are generated)
sim_banner_facingcamera =0x00080 # If specified the banner will always face the camera by rotating around the banner's vertical axis (y-axis)
sim_banner_fullyfacingcamera =0x00100 # If specified the banner will always fully face the camera (the banner's orientation is same as the camera looking at it)
sim_banner_backfaceculling =0x00200 # If specified the banner will only be visible from one side
sim_banner_keepsamesize =0x00400 # If specified the banner will always appear in the same size. In that case size represents the character height in pixels
sim_banner_bitmapfont =0x00800 # If specified a fixed-size bitmap font is used. The text will also always fully face the camera and be right
# to the specified position. Bitmap fonts are not clickable
# particle objects following are mutually exclusive
sim_particle_points1 =0 # 6 values per point (pt1 and pt2. Pt1 is start position pt2-pt1 is the initial velocity vector). i
#Point is 1 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points2 =1 # 6 values per point. Point is 2 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points4 =2 # 6 values per point. Point is 4 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_roughspheres =3 # 6 values per sphere. Only appearance is rough. Internally a perfect sphere
sim_particle_spheres =4 # 6 values per sphere. Internally a perfect sphere
# following can be or-combined
sim_particle_respondable1to4 =0x0020 # the particles are respondable against shapes (against all objects that have at least one bit 1-4 activated in the global respondable mask)
sim_particle_respondable5to8 =0x0040 # the particles are respondable against shapes (against all objects that have at least one bit 5-8 activated in the global respondable mask)
sim_particle_particlerespondable =0x0080 # the particles are respondable against each other
sim_particle_ignoresgravity =0x0100 # the particles ignore the effect of gravity. Not compatible with sim_particle_water
sim_particle_invisible =0x0200 # the particles are invisible
sim_particle_itemsizes =0x0400 # +1 value per particle (each particle can have a different size)
sim_particle_itemdensities =0x0800 # +1 value per particle (each particle can have a different density)
sim_particle_itemcolors =0x1000 # +3 values per particle (each particle can have a different color)
sim_particle_cyclic =0x2000 # if the max item count was reached then the first items are overwritten.
sim_particle_emissioncolor =0x4000 # When used in combination with sim_particle_itemcolors then the specified colors will be for the emissive component
sim_particle_water =0x8000 # the particles are water particles (no weight in the water (i.e. when z<0)). Not compatible with sim_particle_ignoresgravity
sim_particle_painttag =0x10000 # The particles can be seen by vision sensors (sim_particle_invisible must not be set)
# custom user interface menu attributes
sim_ui_menu_title =1
sim_ui_menu_minimize =2
sim_ui_menu_close =4
sim_ui_menu_systemblock =8
# Boolean parameters
sim_boolparam_hierarchy_visible =0
sim_boolparam_console_visible =1
sim_boolparam_collision_handling_enabled =2
sim_boolparam_distance_handling_enabled =3
sim_boolparam_ik_handling_enabled =4
sim_boolparam_gcs_handling_enabled =5
sim_boolparam_dynamics_handling_enabled =6
sim_boolparam_joint_motion_handling_enabled =7
sim_boolparam_path_motion_handling_enabled =8
sim_boolparam_proximity_sensor_handling_enabled =9
sim_boolparam_vision_sensor_handling_enabled =10
sim_boolparam_mill_handling_enabled =11
sim_boolparam_browser_visible =12
sim_boolparam_scene_and_model_load_messages =13
sim_reserved0 =14
sim_boolparam_shape_textures_are_visible =15
sim_boolparam_display_enabled =16
sim_boolparam_infotext_visible =17
sim_boolparam_statustext_open =18
sim_boolparam_fog_enabled =19
sim_boolparam_rml2_available =20
sim_boolparam_rml4_available =21
sim_boolparam_mirrors_enabled =22
sim_boolparam_aux_clip_planes_enabled =23
sim_boolparam_full_model_copy_from_api =24
sim_boolparam_realtime_simulation =25
sim_boolparam_force_show_wireless_emission =27
sim_boolparam_force_show_wireless_reception =28
sim_boolparam_video_recording_triggered =29
sim_boolparam_threaded_rendering_enabled =32
sim_boolparam_fullscreen =33
sim_boolparam_headless =34
sim_boolparam_hierarchy_toolbarbutton_enabled =35
sim_boolparam_browser_toolbarbutton_enabled =36
sim_boolparam_objectshift_toolbarbutton_enabled =37
sim_boolparam_objectrotate_toolbarbutton_enabled=38
sim_boolparam_force_calcstruct_all_visible =39
sim_boolparam_force_calcstruct_all =40
sim_boolparam_exit_request =41
sim_boolparam_play_toolbarbutton_enabled =42
sim_boolparam_pause_toolbarbutton_enabled =43
sim_boolparam_stop_toolbarbutton_enabled =44
sim_boolparam_waiting_for_trigger =45
# Integer parameters
sim_intparam_error_report_mode =0 # Check sim_api_errormessage_... constants above for valid values
sim_intparam_program_version =1 # e.g Version 2.1.4 --> 20104. Can only be read
sim_intparam_instance_count =2 # do not use anymore (always returns 1 since V-REP 2.5.11)
sim_intparam_custom_cmd_start_id =3 # can only be read
sim_intparam_compilation_version =4 # 0=evaluation version 1=full version 2=player version. Can only be read
sim_intparam_current_page =5
sim_intparam_flymode_camera_handle =6 # can only be read
sim_intparam_dynamic_step_divider =7 # can only be read
sim_intparam_dynamic_engine =8 # 0=Bullet 1=ODE. 2=Vortex.
sim_intparam_server_port_start =9 # can only be read
sim_intparam_server_port_range =10 # can only be read
sim_intparam_visible_layers =11
sim_intparam_infotext_style =12
sim_intparam_settings =13
sim_intparam_edit_mode_type =14 # can only be read
sim_intparam_server_port_next =15 # is initialized at sim_intparam_server_port_start
sim_intparam_qt_version =16 # version of the used Qt framework
sim_intparam_event_flags_read =17 # can only be read
sim_intparam_event_flags_read_clear =18 # can only be read
sim_intparam_platform =19 # can only be read
sim_intparam_scene_unique_id =20 # can only be read
sim_intparam_work_thread_count =21
sim_intparam_mouse_x =22
sim_intparam_mouse_y =23
sim_intparam_core_count =24
sim_intparam_work_thread_calc_time_ms =25
sim_intparam_idle_fps =26
sim_intparam_prox_sensor_select_down =27
sim_intparam_prox_sensor_select_up =28
sim_intparam_stop_request_counter =29
sim_intparam_program_revision =30
sim_intparam_mouse_buttons =31
sim_intparam_dynamic_warning_disabled_mask =32
sim_intparam_simulation_warning_disabled_mask =33
sim_intparam_scene_index =34
sim_intparam_motionplanning_seed =35
sim_intparam_speedmodifier =36
# Float parameters
sim_floatparam_rand=0 # random value (0.0-1.0)
sim_floatparam_simulation_time_step =1
sim_floatparam_stereo_distance =2
# String parameters
sim_stringparam_application_path=0 # path of V-REP's executable
sim_stringparam_video_filename=1
sim_stringparam_app_arg1 =2
sim_stringparam_app_arg2 =3
sim_stringparam_app_arg3 =4
sim_stringparam_app_arg4 =5
sim_stringparam_app_arg5 =6
sim_stringparam_app_arg6 =7
sim_stringparam_app_arg7 =8
sim_stringparam_app_arg8 =9
sim_stringparam_app_arg9 =10
sim_stringparam_scene_path_and_name =13
# Array parameters
sim_arrayparam_gravity =0
sim_arrayparam_fog =1
sim_arrayparam_fog_color =2
sim_arrayparam_background_color1=3
sim_arrayparam_background_color2=4
sim_arrayparam_ambient_light =5
sim_arrayparam_random_euler =6
sim_objintparam_visibility_layer= 10
sim_objfloatparam_abs_x_velocity= 11
sim_objfloatparam_abs_y_velocity= 12
sim_objfloatparam_abs_z_velocity= 13
sim_objfloatparam_abs_rot_velocity= 14
sim_objfloatparam_objbbox_min_x= 15
sim_objfloatparam_objbbox_min_y= 16
sim_objfloatparam_objbbox_min_z= 17
sim_objfloatparam_objbbox_max_x= 18
sim_objfloatparam_objbbox_max_y= 19
sim_objfloatparam_objbbox_max_z= 20
sim_objfloatparam_modelbbox_min_x= 21
sim_objfloatparam_modelbbox_min_y= 22
sim_objfloatparam_modelbbox_min_z= 23
sim_objfloatparam_modelbbox_max_x= 24
sim_objfloatparam_modelbbox_max_y= 25
sim_objfloatparam_modelbbox_max_z= 26
sim_objintparam_collection_self_collision_indicator= 27
sim_objfloatparam_transparency_offset= 28
sim_objintparam_child_role= 29
sim_objintparam_parent_role= 30
sim_objintparam_manipulation_permissions= 31
sim_objintparam_illumination_handle= 32
sim_visionfloatparam_near_clipping= 1000
sim_visionfloatparam_far_clipping= 1001
sim_visionintparam_resolution_x= 1002
sim_visionintparam_resolution_y= 1003
sim_visionfloatparam_perspective_angle= 1004
sim_visionfloatparam_ortho_size= 1005
sim_visionintparam_disabled_light_components= 1006
sim_visionintparam_rendering_attributes= 1007
sim_visionintparam_entity_to_render= 1008
sim_visionintparam_windowed_size_x= 1009
sim_visionintparam_windowed_size_y= 1010
sim_visionintparam_windowed_pos_x= 1011
sim_visionintparam_windowed_pos_y= 1012
sim_visionintparam_pov_focal_blur= 1013
sim_visionfloatparam_pov_blur_distance= 1014
sim_visionfloatparam_pov_aperture= 1015
sim_visionintparam_pov_blur_sampled= 1016
sim_visionintparam_render_mode= 1017
sim_jointintparam_motor_enabled= 2000
sim_jointintparam_ctrl_enabled= 2001
sim_jointfloatparam_pid_p= 2002
sim_jointfloatparam_pid_i= 2003
sim_jointfloatparam_pid_d= 2004
sim_jointfloatparam_intrinsic_x= 2005
sim_jointfloatparam_intrinsic_y= 2006
sim_jointfloatparam_intrinsic_z= 2007
sim_jointfloatparam_intrinsic_qx= 2008
sim_jointfloatparam_intrinsic_qy= 2009
sim_jointfloatparam_intrinsic_qz= 2010
sim_jointfloatparam_intrinsic_qw= 2011
sim_jointfloatparam_velocity= 2012
sim_jointfloatparam_spherical_qx= 2013
sim_jointfloatparam_spherical_qy= 2014
sim_jointfloatparam_spherical_qz= 2015
sim_jointfloatparam_spherical_qw= 2016
sim_jointfloatparam_upper_limit= 2017
sim_jointfloatparam_kc_k= 2018
sim_jointfloatparam_kc_c= 2019
sim_jointfloatparam_ik_weight= 2021
sim_jointfloatparam_error_x= 2022
sim_jointfloatparam_error_y= 2023
sim_jointfloatparam_error_z= 2024
sim_jointfloatparam_error_a= 2025
sim_jointfloatparam_error_b= 2026
sim_jointfloatparam_error_g= 2027
sim_jointfloatparam_error_pos= 2028
sim_jointfloatparam_error_angle= 2029
sim_jointintparam_velocity_lock= 2030
sim_jointintparam_vortex_dep_handle= 2031
sim_jointfloatparam_vortex_dep_multiplication= 2032
sim_jointfloatparam_vortex_dep_offset= 2033
sim_shapefloatparam_init_velocity_x= 3000
sim_shapefloatparam_init_velocity_y= 3001
sim_shapefloatparam_init_velocity_z= 3002
sim_shapeintparam_static= 3003
sim_shapeintparam_respondable= 3004
sim_shapefloatparam_mass= 3005
sim_shapefloatparam_texture_x= 3006
sim_shapefloatparam_texture_y= 3007
sim_shapefloatparam_texture_z= 3008
sim_shapefloatparam_texture_a= 3009
sim_shapefloatparam_texture_b= 3010
sim_shapefloatparam_texture_g= 3011
sim_shapefloatparam_texture_scaling_x= 3012
sim_shapefloatparam_texture_scaling_y= 3013
sim_shapeintparam_culling= 3014
sim_shapeintparam_wireframe= 3015
sim_shapeintparam_compound= 3016
sim_shapeintparam_convex= 3017
sim_shapeintparam_convex_check= 3018
sim_shapeintparam_respondable_mask= 3019
sim_shapefloatparam_init_velocity_a= 3020
sim_shapefloatparam_init_velocity_b= 3021
sim_shapefloatparam_init_velocity_g= 3022
sim_shapestringparam_color_name= 3023
sim_shapeintparam_edge_visibility= 3024
sim_shapefloatparam_shading_angle= 3025
sim_shapefloatparam_edge_angle= 3026
sim_shapeintparam_edge_borders_hidden= 3027
sim_proxintparam_ray_invisibility= 4000
sim_forcefloatparam_error_x= 5000
sim_forcefloatparam_error_y= 5001
sim_forcefloatparam_error_z= 5002
sim_forcefloatparam_error_a= 5003
sim_forcefloatparam_error_b= 5004
sim_forcefloatparam_error_g= 5005
sim_forcefloatparam_error_pos= 5006
sim_forcefloatparam_error_angle= 5007
sim_lightintparam_pov_casts_shadows= 8000
sim_cameraintparam_disabled_light_components= 9000
sim_camerafloatparam_perspective_angle= 9001
sim_camerafloatparam_ortho_size= 9002
sim_cameraintparam_rendering_attributes= 9003
sim_cameraintparam_pov_focal_blur= 9004
sim_camerafloatparam_pov_blur_distance= 9005
sim_camerafloatparam_pov_aperture= 9006
sim_cameraintparam_pov_blur_samples= 9007
sim_dummyintparam_link_type= 10000
sim_mirrorfloatparam_width= 12000
sim_mirrorfloatparam_height= 12001
sim_mirrorfloatparam_reflectance= 12002
sim_mirrorintparam_enable= 12003
sim_pplanfloatparam_x_min= 20000
sim_pplanfloatparam_x_range= 20001
sim_pplanfloatparam_y_min= 20002
sim_pplanfloatparam_y_range= 20003
sim_pplanfloatparam_z_min= 20004
sim_pplanfloatparam_z_range= 20005
sim_pplanfloatparam_delta_min= 20006
sim_pplanfloatparam_delta_range= 20007
sim_mplanintparam_nodes_computed= 25000
sim_mplanintparam_prepare_nodes= 25001
sim_mplanintparam_clear_nodes= 25002
# User interface elements
sim_gui_menubar =0x0001
sim_gui_popups =0x0002
sim_gui_toolbar1 =0x0004
sim_gui_toolbar2 =0x0008
sim_gui_hierarchy =0x0010
sim_gui_infobar =0x0020
sim_gui_statusbar =0x0040
sim_gui_scripteditor =0x0080
sim_gui_scriptsimulationparameters =0x0100
sim_gui_dialogs =0x0200
sim_gui_browser =0x0400
sim_gui_all =0xffff
# Joint modes
sim_jointmode_passive =0
sim_jointmode_motion =1
sim_jointmode_ik =2
sim_jointmode_ikdependent =3
sim_jointmode_dependent =4
sim_jointmode_force =5
# Navigation and selection modes with the mouse. Lower byte values are mutually exclusive upper byte bits can be combined
sim_navigation_passive =0x0000
sim_navigation_camerashift =0x0001
sim_navigation_camerarotate =0x0002
sim_navigation_camerazoom =0x0003
sim_navigation_cameratilt =0x0004
sim_navigation_cameraangle =0x0005
sim_navigation_camerafly =0x0006
sim_navigation_objectshift =0x0007
sim_navigation_objectrotate =0x0008
sim_navigation_reserved2 =0x0009
sim_navigation_reserved3 =0x000A
sim_navigation_jointpathtest =0x000B
sim_navigation_ikmanip =0x000C
sim_navigation_objectmultipleselection =0x000D
# Bit-combine following values and add them to one of above's values for a valid navigation mode
sim_navigation_reserved4 =0x0100
sim_navigation_clickselection =0x0200
sim_navigation_ctrlselection =0x0400
sim_navigation_shiftselection =0x0800
sim_navigation_camerazoomwheel =0x1000
sim_navigation_camerarotaterightbutton =0x2000
#Remote API constants
SIMX_VERSION =0
# Remote API message header structure
SIMX_HEADER_SIZE | |
"""author is an rfc2629 author element. Return the author initials,
fixed up according to current flavour and policy."""
initials = author.attrib.get('initials', '')
multiple = author.pis["multiple-initials"] == "yes"
initials_list = re.split("[. ]+", initials)
try:
initials_list.remove('')
except:
pass
if len(initials_list) > 0:
if multiple:
# preserve spacing, but make sure all parts have a trailing
# period
initials = initials.strip()
initials += '.' if not initials.endswith('.') else ''
initials = re.sub('([^.]) ', '\g<1>. ', initials)
else:
initials = initials_list[0] + "."
return initials
def parse_pi(self, pi):
return xml2rfc.utils.parse_pi(pi, self.pis)
def get_numeric_pi(self, key, default):
num = self.pis.get(key, None)
if num is None:
return default
if not num.isdigit():
xml2rfc.log.warn('Expected a numeric value for the %s PI, found "%s"' % (key, num))
return default
return int(num)
def _getTocIndex(self):
return [item for item in self._index if item.toc]
def _getItemByAnchor(self, anchor):
for item in self._index:
if item.autoAnchor == anchor or item.anchor == anchor:
return item
return None
def _validate_ipr(self):
""" Ensure the application has boilerplate for the ipr attribute given """
ipr = self.r.attrib.get('ipr', self.defaults['ipr'])
if not ipr in self.supported_ipr:
raise RfcWriterError('No boilerplate text available for '
'ipr: \'%s\'. Acceptable values are: ' % ipr + \
', '.join(self.supported_ipr))
def is_numbered(self, node):
attr = node.attrib.get('numbered', 'true')
return attr in ['yes', 'true', ]
def _format_date(self):
""" Fix the date data """
today = self.date
date = self.r.find('front/date')
assert date is not None, "Bug in schema validation: no date element in document"
year = date.attrib.get('year')
if not year:
year = str(self.date.year)
date.set('year', year)
if not year.isdigit():
xml2rfc.log.error("Expected a numeric year, found '%s'" % (year, ))
year = int(year)
#
month = date.attrib.get('month')
if not month:
if year != today.year:
xml2rfc.log.error("Cannot handle a <date> with year different than this year, and no month. Using today's date.")
year = today.year
month = today.month
date.set('month', str(month))
else:
if not month.isdigit():
month = xml2rfc.util.date.normalize_month(month)
month = int(month)
#
day = date.attrib.get('day')
if day is None:
temp_date = datetime.date(year=year, month=month, day=1)
if today.year == year and today.month == month:
day = today.day
elif abs(today - temp_date) < datetime.timedelta(days=34):
if datetime.date(year=year, month=month, day=1) < today:
# wrong month, and the first day of that month is earlier
# than today. Use the last day of the month
day = calendar.monthrange(year, month)[1]
else:
# wrong month, later than this month. Use the first day.
day = 1
else:
day = 1
else:
day = int(day)
self.date = datetime.date(year=year, month=month, day=day)
# Setup the expiration string for drafts as published date + six months
if self.draft:
if not date.get('day'):
date.set('day', str(day))
if not date.get('month'):
date.set('month', str(month))
expire_date = self.date + datetime.timedelta(185)
self.expire_string = expire_date.strftime('%B %d, %Y').replace(' 0', ' ')
def _format_counter(self, text, count, list_length=1):
""" Return a proper string for a formatted list bullet. Allowed types:
%c: Lowercase chars
%C: Uppercase chars
%d: Digits
%i: Lowercase roman numerals
%I: Uppercase roman numerals
%o: octal
%x: Lowercase hex
%X: Uppercase hex
"""
import math
roman_widths = { 1:1, 2:2, 3:3, 4:2, 5:1, 6:2, 7:3, 8:4, 9:2,
10:1, 11:2, 12:3, 13:4, 14:3, 15:2, 16:3, 17:4, 18:5, 19:3,
20:2, 21:3, 22:4, 23:5, 24:4, 25:3, 26:4, 27:5, 28:6, 29:4, }
#
decimal_width = int(math.log(list_length, 10))
roman_width = roman_widths.get(list_length, 6)
letter_width = int(math.log(list_length, 26))
hex_width = int(math.log(list_length, 16))
octal_width = int(math.log(list_length, 8))
extra_width = len(text)+1
if '%d' in text:
text = text.replace(r'%d', str(count)).ljust(decimal_width+extra_width)
elif '%c' in text:
text = text.replace(r'%c', xml2rfc.util.num.int2letter(count)).ljust(letter_width+extra_width)
elif '%C' in text:
text = text.replace(r'%C', xml2rfc.util.num.int2letter(count).upper()).ljust(letter_width+extra_width)
elif '%i' in text:
text = text.replace(r'%i', xml2rfc.util.num.int2roman(count)).ljust(roman_width+extra_width)
elif '%I' in text:
text = text.replace(r'%I', xml2rfc.util.num.int2roman(count).upper()).ljust(roman_width+extra_width)
elif '%o' in text:
text = text.replace(r'%o', oct(count).replace("0","",1)).replace("o","",1).ljust(octal_width+extra_width)
elif '%x' in text:
text = text.replace(r'%x', hex(count).replace("0x","",1)).ljust(hex_width+extra_width)
elif '%X' in text:
text = text.replace(r'%X', hex(count).replace("0x","",1).upper()).ljust(hex_width+extra_width)
return text
def _format_author_string(self, authors):
""" Given a list of <author> elements, return a readable string of names """
buf = []
for i, author in enumerate(authors):
organization = author.find('organization')
initials, surname = short_author_name_parts(author)
if i == len(authors) - 1 and len(authors) > 1:
buf.append('and ')
if surname:
initials = self.get_initials(author) or initials or ''
if i == len(authors) - 1 and len(authors) > 1:
# Last author is rendered in reverse
if len(initials) > 0:
buf.append(initials + ' ' + \
surname)
else:
buf.append(surname)
elif len(initials) > 0:
buf.append(surname + ', ' + initials)
else:
buf.append(surname)
if author.attrib.get('role', '') == 'editor':
buf.append(', Ed.')
elif organization is not None and organization.text:
# Use organization instead of name
buf.append(organization.text.strip())
else:
continue
if len(authors) == 2 and i == 0:
buf.append(' ')
elif i < len(authors) - 1:
buf.append(', ')
return ''.join(buf)
def _prepare_top_left(self):
""" Returns a lines of lines for the top left header """
lines = []
# Document stream / workgroup
if not self.pis['private']:
if self.draft:
workgroup = self.r.find('front/workgroup')
if workgroup is not None and workgroup.text:
lines.append(workgroup.text)
else:
lines.append(self.boilerplate['draft_workgroup'])
else:
# Determine 'workgroup' from submissionType
subtype = self.r.attrib.get('submissionType',
self.defaults['submissionType'])
docstream = self.boilerplate['document_stream'].get(subtype)
lines.append(docstream)
# RFC number
if not self.draft:
lines.append('Request for Comments: ' + self.rfcnumber)
elif not self.pis['private']:
lines.append('Internet-Draft')
# Series number
category = self.r.attrib.get('category', '')
seriesNo = self.r.attrib.get('seriesNo')
if seriesNo is not None and category in self.boilerplate['series_name']:
lines.append('%s: %s' % (self.boilerplate['series_name'][category],
seriesNo))
# RFC relation notice
approved_text = self.draft and '(if approved)' or ''
obsoletes = self.r.attrib.get('obsoletes')
if obsoletes:
wrapper = textwrap.TextWrapper(width=40, subsequent_indent=' '*len('Obsoletes: '))
line = 'Obsoletes: %s %s' % (obsoletes, approved_text)
lines += wrapper.wrap(line)
updates = self.r.attrib.get('updates')
if updates:
wrapper = textwrap.TextWrapper(width=40, subsequent_indent=' '*len('Updates: '))
line = 'Updates: %s %s' % (updates, approved_text)
lines += wrapper.wrap(line)
# Category
if category:
cat_text = self.boilerplate[category]
if self.draft:
lines.append('Intended status: ' + cat_text)
else:
lines.append('Category: ' + cat_text)
else:
xml2rfc.log.warn('No category specified for document.')
# Expiration notice for drafts
if self.expire_string and not self.pis['private']:
lines.append('Expires: ' + self.expire_string)
# ISSN identifier
if not self.draft:
lines.append('ISSN: %s' % self.boilerplate['issn'])
# Strip any whitespace from XML to make header as neat as possible
lines = [ l.rstrip() for l in lines ]
return lines
def _prepare_top_right(self):
""" Returns a list of lines for the top right header """
lines = []
# Keep track of previous organization and remove if redundant.
last_org = None
last_pos = None
authors = self.r.findall('front/author')
authors = [ a for a in authors if a.get('role') != 'contributor' ]
for author in authors:
role = author.attrib.get('role', '')
if role == 'editor':
role = ', Ed.'
initials = self.get_initials(author)
lines.append(initials + ' ' + author.attrib.\
get('surname', '') + role)
organization = author.find('organization')
org_name = ''
if self.options.first_page_author_org:
if organization is not None:
abbrev = organization.attrib.get("abbrev", None)
if abbrev != None and abbrev.strip() != '':
org_name = abbrev.strip()
elif organization.text and organization.text.strip() != '':
org_name = organization.text.strip()
if org_name == '':
lines.append('')
else:
if org_name == last_org:
# Remove redundant organization
del lines[last_pos]
lines.append(org_name)
last_org = org_name
last_pos = len(lines)-1
# remove blank lines between authors and date
if lines[last_pos] == '':
del lines[last_pos]
last_pos = len(lines)-1
date = self.r.find('front/date')
if date is not None:
year = date.attrib.get('year', '')
month = date.attrib.get('month', '')
day = date.attrib.get('day', '')
if month:
if month.isdigit():
month = calendar.month_name[int(month)]
month = month + ' '
if day:
day = day + ', '
lines.append(month + day + year)
# Strip any whitespace from XML to make header as neat as possible
lines = [ l.strip() for l in lines ]
return lines
def write_figure(self, figure):
""" Writes <figure> elements """
figure_align = figure.attrib.get('align', self.defaults['figure_align'])
anchor = figure.attrib.get('anchor')
title = figure.attrib.get('title', self.defaults['figure_title'])
suppress_title = figure.attrib.get('suppress-title', 'false')
# Keep track of count if there is an anchor, or PI was enabled
if anchor or self.pis['figurecount'] == 'yes':
self.figure_count += 1
if anchor:
# Insert anchor(s) for the figure
self.insert_anchor('rfc.figure.' + str(self.figure_count))
self.insert_anchor(anchor)
if self.indexmode:
# Add figure to the index, inserting any anchors necessary
self._indexFigure(self.figure_count, anchor=anchor, title=title)
# Write preamble
preamble = figure.find('preamble')
if preamble is not None:
self.write_t_rec(preamble, align=figure_align)
# iref
| |
<filename>tests/modules/voting/test_voting.py
"""
Tests donut/modules/voting
"""
from datetime import date, datetime, timedelta
import json
import re
import flask
import pytest
from donut.testing.fixtures import client
from donut import app
from donut.modules.groups.helpers import get_group_list_data
from donut.modules.voting import helpers, routes, ranked_pairs
# Ranked pairs
def test_ranked_pairs():
# Example taken from en.wikipedia.org/wiki/Ranked_pairs
M = 'Memphis'
N = 'Nashville'
C = 'Chattanooga'
K = 'Knoxville'
responses = (((M, ), (N, ), (C, ), (K, )), ) * 42
responses += (((N, ), (C, ), (K, ), (M, )), ) * 26
responses += (((C, ), (K, ), (N, ), (M, )), ) * 15
responses += (((K, ), (C, ), (N, ), (M, )), ) * 17
results = ranked_pairs.results(responses)
assert results.winners == [N, C, K, M]
assert results.tallies == {
(C, K): 42 + 26 + 15,
(C, M): 26 + 15 + 17,
(C, N): 15 + 17,
(K, C): 17,
(K, M): 26 + 15 + 17,
(K, N): 15 + 17,
(M, C): 42,
(M, K): 42,
(M, N): 42,
(N, C): 42 + 26,
(N, K): 42 + 26,
(N, M): 26 + 15 + 17,
}
# Test incomplete lists
results = ranked_pairs.results([[['A']], [['B']], [['A']]])
assert results.winners == ['A', 'B']
# Test ties
responses = [[['A'], ['B', 'C'], ['D']], [['A', 'C'], ['B', 'D']]]
assert ranked_pairs.results(responses).winners == ['A', 'C', 'B', 'D']
# Helpers
def test_question_types(client):
assert helpers.get_question_types() == {
'Dropdown': 1,
'Checkboxes': 2,
'Elected position': 3,
'Short text': 4,
'Long text': 5
}
def test_public_surveys(client):
ruddock_id = get_group_list_data(
['group_id'], {'group_name': 'Ruddock House'})[0]['group_id']
survey_params = [
{
'title': 'Unrestricted',
'group': '',
'end_hour': '12'
},
{
'title': 'Ruddock only',
'group': str(ruddock_id),
'end_hour': '1' # ends later
}
]
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
access_keys = {}
with client.session_transaction() as sess:
sess['username'] = 'csander'
for params in survey_params:
rv = client.post(
flask.url_for('voting.make_survey'),
data=dict(
description='',
start_date=yesterday.strftime(helpers.YYYY_MM_DD),
start_hour='12',
start_minute='00',
start_period='P',
end_date=tomorrow.strftime(helpers.YYYY_MM_DD),
end_minute='00',
end_period='P',
public='on',
**params),
follow_redirects=False)
assert rv.status_code == 302
access_keys[params['title']] = [
url_piece for url_piece in rv.location.split('/')
if len(url_piece) == 64
][0]
unrestricted = {
'title': 'Unrestricted',
'description': None,
'end_time': datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'access_key': access_keys['Unrestricted'],
'group_id': None
}
assert list(helpers.get_visible_surveys(helpers.get_user_id(
'dqu'))) == [ # not a Rudd
unrestricted
]
assert list(
helpers.get_visible_surveys(helpers.get_user_id('csander'))) == [
unrestricted, {
'title':
'Ruddock only',
'description':
None,
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 13),
'access_key':
access_keys['Ruddock only'],
'group_id':
2
}
]
def test_closed_surveys(client):
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
survey_params = [{
'title': 'Before',
'start_date': yesterday.strftime(helpers.YYYY_MM_DD),
'start_hour': '2',
'end_date': yesterday.strftime(helpers.YYYY_MM_DD),
'end_hour': '3'
}, {
'title': 'During',
'start_date': yesterday.strftime(helpers.YYYY_MM_DD),
'start_hour': '4',
'end_date': tomorrow.strftime(helpers.YYYY_MM_DD),
'end_hour': '5'
}, {
'title': 'After',
'start_date': tomorrow.strftime(helpers.YYYY_MM_DD),
'start_hour': '2',
'end_date': tomorrow.strftime(helpers.YYYY_MM_DD),
'end_hour': '3'
}]
access_keys = {}
with client.session_transaction() as sess:
sess['username'] = 'csander'
for params in survey_params:
rv = client.post(
flask.url_for('voting.make_survey'),
data=dict(
description='',
start_minute='00',
start_period='P',
end_minute='00',
end_period='P',
public='on',
group='',
**params),
follow_redirects=False)
assert rv.status_code == 302
access_keys[params['title']] = [
url_piece for url_piece in rv.location.split('/')
if len(url_piece) == 64
][0]
assert helpers.get_closed_surveys(helpers.get_user_id('reng')) == (
) # not the creator of 'Before'
before = [{
'title':
'Before',
'description':
None,
'end_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 15),
'access_key':
access_keys['Before'],
'results_shown':
0
}]
assert helpers.get_closed_surveys(helpers.get_user_id('csander')) == before
rv = client.get(
flask.url_for(
'voting.release_results', access_key=access_keys['Before']))
assert rv.status_code == 302
assert rv.location == flask.url_for(
'voting.show_results', access_key=access_keys['Before'])
before[0]['results_shown'] = 1
assert helpers.get_closed_surveys(helpers.get_user_id('reng')) == before
assert helpers.get_closed_surveys(helpers.get_user_id('csander')) == before
helpers.delete_survey(3)
helpers.delete_survey(4)
helpers.delete_survey(5)
def test_survey_data(client):
access_key = list(helpers.get_visible_surveys(1))[0]['access_key']
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
assert helpers.get_survey_data(access_key) == {
'survey_id':
1,
'title':
'Unrestricted',
'description':
None,
'group_id':
None,
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'creator':
3,
'results_shown':
0
}
def test_question_json(client):
question_types = helpers.get_question_types()
helpers.set_questions(1, [{
'title': 'A',
'description': '',
'type': question_types['Dropdown'],
'choices': ['1', '2', '3']
}, {
'title': 'B',
'description': 'bbb',
'type': question_types['Short text']
}, {
'title': 'C',
'description': 'ccc',
'type': question_types['Checkboxes'],
'choices': ['a', 'b', 'c']
}, {
'title': 'D',
'description': '',
'type': question_types['Long text']
}, {
'title': 'E',
'description': '',
'type': question_types['Elected position'],
'choices': ['do', 're', 'me']
}])
assert helpers.get_questions_json(
1, False
) == '[{"title":"A","description":"","type":1,"choices":["1","2","3"]},{"title":"B","description":"bbb","type":4},{"title":"C","description":"ccc","type":2,"choices":["a","b","c"]},{"title":"D","description":"","type":5},{"title":"E","description":"","type":3,"choices":["do","re","me"]}]'
assert helpers.get_questions_json(
1, True
) == '[{"question_id":1,"title":"A","description":"","type":1,"choices":[{"id":1,"choice":"1"},{"id":2,"choice":"2"},{"id":3,"choice":"3"}]},{"question_id":2,"title":"B","description":"bbb","type":4},{"question_id":3,"title":"C","description":"ccc","type":2,"choices":[{"id":4,"choice":"a"},{"id":5,"choice":"b"},{"id":6,"choice":"c"}]},{"question_id":4,"title":"D","description":"","type":5},{"question_id":5,"title":"E","description":"","type":3,"choices":[{"id":7,"choice":"do"},{"id":8,"choice":"re"},{"id":9,"choice":"me"}]}]'
def test_question_ids(client):
assert helpers.get_question_ids(1) == [1, 2, 3, 4, 5]
assert helpers.get_question_ids(2) == []
def test_question_type(client):
assert list(map(helpers.get_question_type, range(1, 6))) == [1, 4, 2, 5, 3]
def test_get_choice(client):
assert [
helpers.invalid_choice_id(5, choice)
for choice in ['abc', 7, 8, 9, 10]
] == [True, False, False, False, True]
def test_process_params_error(client):
default_params = dict(
title='New survey',
description='',
start_date='2018-05-08',
start_hour='12',
start_minute='00',
start_period='P',
end_date='2018-05-10',
end_hour='12',
end_minute='00',
end_period='P',
public='on',
group='')
def assert_message(message, params):
rv = client.post(
flask.url_for('voting.make_survey'),
data=params,
follow_redirects=False)
assert rv.status_code == 200
assert message in rv.data
rv = client.post(
flask.url_for('voting.make_survey'), follow_redirects=False)
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'csander'
for delete_param in default_params:
if delete_param == 'public': continue # this param is optional
params = default_params.copy()
del params[delete_param]
assert_message(b'Invalid form data', params)
for date_field in ['start_date', 'end_date']:
assert_message(b'Invalid form data', {
**default_params, date_field: '123'
})
for hour_field in ['start_hour', 'end_hour']:
assert_message(b'Invalid form data', {
**default_params, hour_field: 'abc'
})
assert_message(b'Invalid form data', {
**default_params, hour_field: '0'
})
assert_message(b'Invalid form data', {
**default_params, hour_field: '13'
})
for minute_field in ['start_minute', 'end_minute']:
assert_message(b'Invalid form data', {
**default_params, minute_field: 'abc'
})
assert_message(b'Invalid form data', {
**default_params, minute_field: '-1'
})
assert_message(b'Invalid form data', {
**default_params, minute_field: '60'
})
for period_field in ['start_period', 'end_period']:
assert_message(b'Invalid form data', {
**default_params, period_field: 'a'
})
assert_message(b'Invalid form data', {
**default_params, period_field: ''
})
assert_message(b'Invalid form data', {**default_params, 'group': 'a'})
assert_message(b'Start must be before end', {
**default_params, 'start_date': '2018-05-09',
'end_date': '2018-05-08'
})
rv = client.post(
flask.url_for('voting.make_survey'),
data=default_params,
follow_redirects=False)
assert rv.status_code == 302 # successful
helpers.delete_survey(6)
def test_survey_params(client):
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
assert helpers.get_survey_params(1) == {
'title':
'Unrestricted',
'description':
None,
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'group_id':
None,
'public':
1
}
helpers.update_survey_params(1, {'title': 'ABC', 'group_id': 2})
assert helpers.get_survey_params(1) == {
'title':
'ABC',
'description':
None,
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'group_id':
2,
'public':
1
}
def test_my_surveys(client):
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
assert helpers.get_my_surveys(helpers.get_user_id('dqu')) == ()
csander = helpers.get_user_id('csander')
assert helpers.get_my_surveys(csander) == [{
'title':
'ABC',
'description':
None,
'access_key':
list(helpers.get_visible_surveys(csander))[0]['access_key'],
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'unopened':
0,
'closed':
0,
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12)
}, {
'title':
'Ruddock only',
'description':
None,
'access_key':
list(helpers.get_visible_surveys(csander))[1]['access_key'],
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'unopened':
0,
'closed':
0,
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 13)
}]
def test_respond(client):
assert not helpers.some_responses_for_survey(1)
with app.test_request_context():
flask.session['username'] = 'csander'
helpers.set_responses([1, 2, 3, 4, 5], [
'2', '"asdf"', '[4, 6]', '"Lorem ipsum dolor sit amet"',
'[[7], [-1], [9], [-2], [null]]'
])
assert helpers.some_responses_for_survey(1)
results = helpers.get_results(1)
election_result = results.pop()
assert results == [{
'question_id': 1,
'title': 'A',
'description': None,
'type': 1,
'list_order': 0,
'choices': {
1: '1',
2: '2',
3: '3'
},
'responses': [2],
'results': [(2, 1)]
}, {
'question_id': 2,
'title': 'B',
'description': 'bbb',
'type': 4,
'list_order': 1,
'choices': 0,
'responses': ['asdf'],
'results': [('asdf', 1)]
}, {
'question_id': 3,
'title': 'C',
'description': 'ccc',
'type': 2,
'list_order': 2,
'choices': {
4: 'a',
5: 'b',
6: 'c'
},
'responses': [[4, 6]],
'results': [(4, 1), (6, 1)]
}, {
'question_id': 4,
'title': 'D',
'description': None,
'type': 5,
'list_order': 3,
'choices': 0,
'responses': ['Lorem ipsum dolor sit amet'],
'results': [('Lorem ipsum dolor sit amet', 1)]
}]
results = election_result.pop('results')
assert election_result == {
'question_id': 5,
'title': 'E',
'description': None,
'type': 3,
'list_order': 4,
'choices': {
7: 'do',
8: 're',
9: 'me'
},
'responses': [[['do'], ['<NAME>'], ['me'], ['<NAME>'], ['NO']]]
}
assert results.winners == ['do', '<NAME>', 'me', '<NAME>', 'NO']
with app.test_request_context():
flask.session['username'] = 'dqu'
# Invalid elected position response
helpers.set_responses([5], ['[["abc"]]'])
with pytest.raises(Exception) as e:
helpers.get_results(1)
assert e.value.args == ('Unrecognized elected position vote', )
def test_restrict_access(client):
assert helpers.restrict_take_access(None) == 'Invalid access key'
yesterday = datetime.now() + timedelta(days=-1)
tomorrow = datetime.now() + timedelta(days=1)
assert helpers.restrict_take_access({
'start_time': yesterday,
'end_time': yesterday
}) == 'Survey is not currently accepting responses'
assert helpers.restrict_take_access({
'start_time': tomorrow,
'end_time': tomorrow
}) == 'Survey is not currently accepting responses'
with app.test_request_context():
assert helpers.restrict_take_access({
'start_time': yesterday,
'end_time': tomorrow
}) == 'Must be logged in to take survey'
with app.test_request_context():
flask.session['username'] = | |
in self.papers.items():
self.papers[paper_id] = paper._replace(in_signatures=str(paper_id) in papers_from_signatures)
self.preprocess = preprocess
if name_tuples is None:
self.name_tuples = set()
with open(os.path.join(PROJECT_ROOT_PATH, "data", "s2and_name_tuples.txt"), "r") as f2: # type: ignore
for line in f2:
line_split = line.strip().split(",") # type: ignore
self.name_tuples.add((line_split[0], line_split[1]))
else:
self.name_tuples = name_tuples
logger.info("preprocessing papers")
self.papers = preprocess_papers_parallel(self.papers, self.n_jobs, self.preprocess)
logger.info("preprocessed papers")
logger.info("preprocessing signatures")
self.preprocess_signatures(name_counts_loaded)
logger.info("preprocessed signatures")
@staticmethod
def get_full_name_for_features(signature: Signature, include_last: bool = True, include_suffix: bool = True) -> str:
"""
Creates the full name from the name parts.
Parameters
----------
signature: Signature
the signature to create the full name for
include_last: bool
whether to include the last name
include_suffix: bool
whether to include the suffix
Returns
-------
string: the full name
"""
first = signature.author_info_first_normalized_without_apostrophe or signature.author_info_first
middle = signature.author_info_middle_normalized_without_apostrophe or signature.author_info_middle
last = signature.author_info_last_normalized or signature.author_info_last
suffix = signature.author_info_suffix_normalized or signature.author_info_suffix
list_of_parts = [first, middle]
if include_last:
list_of_parts.append(last)
if include_suffix:
list_of_parts.append(suffix)
name_parts = [part.strip() for part in list_of_parts if part is not None and len(part) != 0]
return " ".join(name_parts)
def preprocess_signatures(self, load_name_counts: bool):
"""
Preprocess the signatures, doing lots of normalization and feature creation
Parameters
----------
load_name_counts: bool
whether name counts were loaded (mostly just here so we can not load them when running tests)
Returns
-------
nothing, modifies self.signatures
"""
for signature_id, signature in tqdm(self.signatures.items(), desc="Preprocessing signatures"):
# our normalization scheme is to normalize first and middle separately,
# join them, then take the first token of the combined join
first_normalized = normalize_text(signature.author_info_first or "")
first_normalized_without_apostrophe = normalize_text(
signature.author_info_first or "", special_case_apostrophes=True
)
middle_normalized = normalize_text(signature.author_info_middle or "")
first_middle_normalized_split = (first_normalized + " " + middle_normalized).split(" ")
if first_middle_normalized_split[0] in NAME_PREFIXES:
first_middle_normalized_split = first_middle_normalized_split[1:]
first_middle_normalized_split_without_apostrophe = (
first_normalized_without_apostrophe + " " + middle_normalized
).split(" ")
if first_middle_normalized_split_without_apostrophe[0] in NAME_PREFIXES:
first_middle_normalized_split_without_apostrophe = first_middle_normalized_split_without_apostrophe[1:]
coauthors: Optional[List[str]] = None
if len(self.papers) != 0:
paper = self.papers[str(signature.paper_id)]
coauthors = [
author.author_name for author in paper.authors if author.position != signature.author_info_position
]
signature = signature._replace(
author_info_first_normalized=first_middle_normalized_split[0],
author_info_first_normalized_without_apostrophe=first_middle_normalized_split_without_apostrophe[0],
author_info_middle_normalized=" ".join(first_middle_normalized_split[1:]),
author_info_middle_normalized_without_apostrophe=" ".join(
first_middle_normalized_split_without_apostrophe[1:]
),
author_info_last_normalized=normalize_text(signature.author_info_last),
author_info_suffix_normalized=normalize_text(signature.author_info_suffix or ""),
author_info_coauthors=set(coauthors) if coauthors is not None else None,
author_info_coauthor_blocks=set([compute_block(author) for author in coauthors])
if coauthors is not None
else None,
)
if self.preprocess:
affiliations = [normalize_text(affiliation) for affiliation in signature.author_info_affiliations]
affiliations_n_grams = get_text_ngrams_words(
" ".join(affiliations),
AFFILIATIONS_STOP_WORDS,
)
email_prefix = (
signature.author_info_email.split("@")[0]
if signature.author_info_email is not None and len(signature.author_info_email) > 0
else None
)
if load_name_counts:
first_last_for_count = (
signature.author_info_first_normalized + " " + signature.author_info_last_normalized
).strip()
first_initial = (
signature.author_info_first_normalized
if len(signature.author_info_first_normalized) > 0
else ""
)
last_first_initial_for_count = (signature.author_info_last_normalized + " " + first_initial).strip()
counts = NameCounts(
first=self.first_dict.get(signature.author_info_first_normalized, 1)
if len(signature.author_info_first_normalized) > 1
else np.nan,
last=self.last_dict.get(signature.author_info_last_normalized, 1),
first_last=self.first_last_dict.get(first_last_for_count, 1)
if len(signature.author_info_first_normalized) > 1
else np.nan,
last_first_initial=self.last_first_initial_dict.get(last_first_initial_for_count, 1),
)
else:
counts = NameCounts(first=None, last=None, first_last=None, last_first_initial=None)
signature = signature._replace(
author_info_full_name=ANDData.get_full_name_for_features(signature).strip(),
author_info_affiliations=affiliations,
author_info_affiliations_n_grams=affiliations_n_grams,
author_info_coauthor_n_grams=get_text_ngrams(" ".join(coauthors), stopwords=None, use_bigrams=True)
if coauthors is not None
else Counter(),
author_info_email_prefix_ngrams=get_text_ngrams(email_prefix, stopwords=None, use_bigrams=True),
author_info_name_counts=counts,
)
self.signatures[signature_id] = signature
@staticmethod
def maybe_load_json(path_or_json: Optional[Union[str, Union[List, Dict]]]) -> Any:
"""
Either loads a dictionary from a json file or passes through the object
Parameters
----------
path_or_json: string or Dict
the file path or the object
Returns
-------
either the loaded json, or the passed in object
"""
if isinstance(path_or_json, str):
with open(path_or_json) as _json_file:
output = json.load(_json_file)
return output
else:
return path_or_json
@staticmethod
def maybe_load_list(path_or_list: Optional[Union[str, list, Set]]) -> Optional[Union[list, Set]]:
"""
Either loads a list from a text file or passes through the object
Parameters
----------
path_or_list: string or list
the file path or the object
Returns
-------
either the loaded list, or the passed in object
"""
if isinstance(path_or_list, str):
with open(path_or_list, "r") as f:
return f.read().strip().split("\n")
else:
return path_or_list
@staticmethod
def maybe_load_dataframe(path_or_dataframe: Optional[Union[str, pd.DataFrame]]) -> Optional[pd.DataFrame]:
"""
Either loads a dataframe from a csv file or passes through the object
Parameters
----------
path_or_dataframe: string or dataframe
the file path or the object
Returns
-------
either the loaded dataframe, or the passed in object
"""
if type(path_or_dataframe) == str:
return pd.read_csv(path_or_dataframe, sep=",")
else:
return path_or_dataframe
@staticmethod
def maybe_load_specter(path_or_pickle: Optional[Union[str, Dict]]) -> Optional[Dict]:
"""
Either loads a dictionary from a pickle file or passes through the object
Parameters
----------
path_or_pickle: string or dictionary
the file path or the object
Returns
-------
either the loaded json, or the passed in object
"""
if isinstance(path_or_pickle, str):
with open(path_or_pickle, "rb") as _pickle_file:
X, keys = pickle.load(_pickle_file)
D = {}
for i, key in enumerate(keys):
D[key] = X[i, :]
return D
else:
return path_or_pickle
def get_original_blocks(self) -> Dict[str, List[str]]:
"""
Gets the block dict based on the blocks provided with the dataset
Returns
-------
Dict: mapping from block id to list of signatures in the block
"""
block = {}
for signature_id, signature in self.signatures.items():
block_id = signature.author_info_given_block
if block_id not in block:
block[block_id] = [signature_id]
else:
block[block_id].append(signature_id)
return block
def get_s2_blocks(self) -> Dict[str, List[str]]:
"""
Gets the block dict based on the blocks provided by Semantic Scholar data
Returns
-------
Dict: mapping from block id to list of signatures in the block
"""
block: Dict[str, List[str]] = {}
for signature_id, signature in self.signatures.items():
block_id = signature.author_info_block
if block_id not in block:
block[block_id] = [signature_id]
else:
block[block_id].append(signature_id)
return block
def get_blocks(self) -> Dict[str, List[str]]:
"""
Gets the block dict
Returns
-------
Dict: mapping from block id to list of signatures in the block
"""
if self.block_type == "s2":
return self.get_s2_blocks()
elif self.block_type == "original":
return self.get_original_blocks()
else:
raise Exception(f"Unknown block type: {self.block_type}")
def get_constraint(
self,
signature_id_1: str,
signature_id_2: str,
low_value: Union[float, int] = 0,
high_value: Union[float, int] = LARGE_DISTANCE,
dont_merge_cluster_seeds: bool = True,
incremental_dont_use_cluster_seeds: bool = False,
) -> Optional[float]:
"""Applies cluster_seeds and generates the default
constraints which are:
First we apply the passed-in cluster_seeds, then:
(1) if not a.prefix(b) or b.prefix(a) and (a, b) not in self.name_tuples:
distance(a, b) = high_value
(2) if len(a_middle) > 0 and len(b_middle) > 0 and
intersection(a_middle_chars, b_middle_chars) == 0:
distance(a, b) = high_value
There is currently no rule to assign low_value but it would be good
to potentially add an ORCID rule to use low_value
Parameters
----------
signature_id_1: string
one signature id in the pair
signature_id_2: string
the other signature id in the pair
low_value: float
value to assign to same person override
high_value: float
value to assign to different person overrid
dont_merge_cluster_seeds: bool
this flag controls whether to use cluster seeds to enforce "dont merge"
as well as "must merge" constraints
incremental_dont_use_cluster_seeds: bool
Are we clustering in incremental mode? If so, don't use the cluster seeds that came with the dataset
Returns
-------
float: the constraint value
"""
first_1 = self.signatures[signature_id_1].author_info_first_normalized_without_apostrophe
first_2 = self.signatures[signature_id_2].author_info_first_normalized_without_apostrophe
middle_1 = self.signatures[signature_id_1].author_info_middle_normalized_without_apostrophe.split()
paper_1 = self.papers[str(self.signatures[signature_id_1].paper_id)]
paper_2 = self.papers[str(self.signatures[signature_id_2].paper_id)]
# cluster seeds have precedence
if (signature_id_1, signature_id_2) in self.cluster_seeds_disallow or (
signature_id_2,
signature_id_1,
) in self.cluster_seeds_disallow:
return CLUSTER_SEEDS_LOOKUP["disallow"]
elif (
self.cluster_seeds_require.get(signature_id_1, -1) == self.cluster_seeds_require.get(signature_id_2, -2)
) and (not incremental_dont_use_cluster_seeds):
return CLUSTER_SEEDS_LOOKUP["require"]
elif (
dont_merge_cluster_seeds
and (signature_id_1 in self.cluster_seeds_require and signature_id_2 in self.cluster_seeds_require)
and (self.cluster_seeds_require[signature_id_1] != self.cluster_seeds_require[signature_id_2])
):
return CLUSTER_SEEDS_LOOKUP["disallow"]
# just-in-case last name constraint: if last names are different, then disallow
elif (
self.signatures[signature_id_1].author_info_last_normalized
!= self.signatures[signature_id_2].author_info_last_normalized
):
return high_value
# just-in-case first initial constraint: if first initials are different, then disallow
elif len(first_1) > 0 and len(first_2) > 0 and first_1[0] != first_2[0]:
return high_value
# and then language constraints
elif (paper_1.is_reliable and paper_2.is_reliable) and (
paper_1.predicted_language != paper_2.predicted_language
):
return high_value
# and then name based constraints
else:
signature_2 = self.signatures[signature_id_2]
prefix = first_1.startswith(first_2) or first_2.startswith(first_1)
known_alias = (first_1, first_2) in self.name_tuples
# dont cluster together if the two first names are not prefixes of each other, and the pair
# is not present in a provided list of known name pairs
if not prefix and not known_alias:
return high_value
# dont cluster together if there is no intersection between the sets of middle initials
# and both sets are not empty
elif len(middle_1) > 0:
middle_2 = signature_2.author_info_middle_normalized_without_apostrophe.split()
if len(middle_2) > 0:
overlapping_affixes = | |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------------------------
"""
Hill Climbing Meta-Heuristic
----------------------------
Content:
▶ class HillClimbing
─────────────────────────────────────────────────────────────────────────
CIFO - Computation Intelligence for Optimization
Author: <NAME> - <EMAIL> - (2019) version L4.0
"""
# -------------------------------------------------------------------------------------------------
from copy import deepcopy
from dssg_challenge.ga.problem.problem_template import ProblemTemplate
from dssg_challenge.ga.problem.objective import ProblemObjective
from dssg_challenge.ga.util.observer import LocalSearchMessage
class HillClimbing:
"""
Classic Implementation of Hill Climbing with some improvements.
Improvements:
------------
1. Stop-Conditions flexibility
Algorithm:
---------
1: Initialize
2: Repeat while exists a better neighbor
2.1: Get the best neighbor
2.2: Select the best, between the current best and best neighbor
2.3: Check stop conditions
3: Return the best solution
"""
# Constructor
#----------------------------------------------------------------------------------------------
def __init__(self, problem_instance, neighborhood_function, feedback=None, params={}):
"""
Hill Climbing Constructor
Parameters:
-----------
▶ problem_instance - the instance of the problem that the algorithm will search a solution
▶ neighborhood_function - it is expected a function that must follow the signature:
neighborhood_function( solution, problem, neighborhood_size = 0 )
▶ feedback
▶ params - dictionary with configurations for Hill Climbing
e.g.: params = { "Maximum-Iterations" : 100 , "Stop-Conditions" : "Classical", "Neighborhood-Size": -1}
A. "Maximum-Iterations" - the number of maximum iterations (used to stop the search, even there are neighbors better than the current solution)
B. "Stop-Condition" - The approach used to stop conditions
Possible "Search-Methods" : ["Classical", "Alternative-01"]
1. Classical - Stops, when there is no better neighbor or the number of max iterations was achieved.
2. Alternative 1 - Stops when the number of max iterations was achieved. It can be good when the neighborhood can be different for the same solution
C. "Neighborhood-Size" the size of the neighborhood, the default is -1, which means the neighborhood will return all neighbors found
"""
# set
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self._name = "<NAME>"
self._description = ""
self._problem_instance = problem_instance
self._get_neighbors = neighborhood_function
self._feedback = feedback
self._observers = []
self._iteration = 0
self._solution = None
self._neighbor = None
# parse params
# Default params:
# {"Maximum-Iterations": 1000, "Stop-Conditions":"Classical", "Target-Fitness" : None, "Neighborhood-Size": 0,
# "N-Changes": 1}
# Motivation: Enables the user to change some Hill Climbing Behaviors
# (Flexibility)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# max iterations
self._max_iterations = 50
if "Maximum-Iterations" in params:
self._max_iterations = params["Maximum-Iterations"]
# stop condition approach
self._stop_condition_approach = "Classical"
self._check_stop_conditions = self._check_classical_stop_conditions
if "Stop-Conditions" in params:
self._stop_condition_approach = params["Stop-Conditions"]
if self._stop_condition_approach == "Alternative-01":
self._check_stop_conditions = self._check_alternative1_stop_conditions
self._target_fitness = None
if "Target-Fitness" in params: self._target_fitness = params["Target-Fitness"]
self._description = f"Maximum-Iterations: {self._max_iterations} | Stop-Condition: {self._stop_condition_approach} "
# neighborhood size
self._neighborhood_size = 0
if "Neighborhood-Size" in params:
self._neighborhood_size = params["Neighborhood-Size"]
# number of changes (n_changes): number of iterations of the neighborhood defining function
self._n_changes = 1
if "N-Changes" in params:
self._n_changes = params["N-Changes"]
# Prepare the internal methods for multi-objective / single-objective:
# Motivation: Avoid in each selection step check if it is multi-single or min/max
# (Optimization)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if self._problem_instance.objective == ProblemObjective.MultiObjective:
# Multi-objective
print("NOT IMPLEMENTED.")
else:
# Single-Objective
if self._problem_instance.objective == ProblemObjective.Maximization:
self._get_best_neighbor = self._get_best_neighbor_maximization
self._select = self._select_maximization
else:
self._get_best_neighbor = self._get_best_neighbor_minimization
self._select = self._select_minimization
# Search Method
#----------------------------------------------------------------------------------------------
def search(self):
"""
Hill ClimbingSearch Method
--------------------------
Algorithm:
---------
1: Initialize
2: Repeat while exists a better neighbor
2.1: Get the best neighbor
2.2: Select the best, between the current best and best neighbor
2.3: Check stop conditions
3: Return the best solution
"""
self._notify(message=LocalSearchMessage.Started)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
searching = True
self._solution = None
self._neighbor = None
# Search
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 1. Initialize
self._initialize()
self._notify(message=LocalSearchMessage.Initialized)
# 2. Repeat while exists a better neighbor
self._iteration = 0
while searching:
self._iteration += 1
#print('iteration: ' + str(self._iteration))
# 2.1: Get the best neighbor
self._get_best_neighbor()
# 2.2: Select the best, between the current best and best neighbor
changed = self._select()
# 2.3: Check stop conditions
searching = self._check_stop_conditions(changed)
#3: Return the best solution
self._notify(message="FINISHED")
return self._solution
# Initialize: create an initial solution
#----------------------------------------------------------------------------------------------
def _initialize(self):
"""
Create a feasible initial solution
"""
self._solution = self._problem_instance.build_solution(method="Greedy")
while not self._problem_instance.is_admissible(self._solution):
self._solution = self._problem_instance.build_solution(method="Greedy")
self._problem_instance.evaluate_solution(self._solution, feedback=self._feedback)
# _get_best_neighbor for maximization
#----------------------------------------------------------------------------------------------
def _get_best_neighbor_maximization(self):
"""
Get the best neighbor of the neighborhood : MAXIMIZATION
"""
# Get Neighbors of the current solution
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
neighborhood = self._get_neighbors(
solution = self._solution,
problem = self._problem_instance,
neighborhood_size = self._neighborhood_size,
n_changes = self._n_changes)
best_neighbor = None
# Find the best neighbor in neighborhood of the current solution
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for neighbor in neighborhood:
if self._problem_instance.is_admissible(neighbor):
self._problem_instance.evaluate_solution(
solution = neighbor,
feedback = self._feedback
)
if best_neighbor == None:
best_neighbor = neighbor
else:
if neighbor.fitness >= best_neighbor.fitness:
best_neighbor = neighbor
self._neighbor = best_neighbor
# _get_best_neighbor for minimization
#----------------------------------------------------------------------------------------------
def _get_best_neighbor_minimization(self):
"""
Get the best neighbor of the neighborhood : MINIMIZATION
"""
# Get Neighbors of the current solution
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
neighborhood = self._get_neighbors(
solution = self._solution,
problem = self._problem_instance,
neighborhood_size = self._neighborhood_size,
n_changes = self._n_changes)
best_neighbor = None
# Find the best neighbor in neighborhood of the current solution
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for neighbor in neighborhood:
self._problem_instance.evaluate_solution(solution=neighbor, feedback=self._feedback)
if best_neighbor is None:
best_neighbor = deepcopy(neighbor)
else:
if neighbor.fitness <= best_neighbor.fitness:
best_neighbor = deepcopy(neighbor)
self._neighbor = best_neighbor
# _select for minimization
#----------------------------------------------------------------------------------------------
def _select_minimization(self):
"""
Select the better solution : MINIMIZATION
Returns:
- solution: The best between them (solution and neighbor)
- boolean : If changed the current, returns True else returns False
"""
if self._neighbor.fitness <= self._solution.fitness:
self._solution = self._neighbor
self._notify(message=LocalSearchMessage.ReplacementAccepted)
return True
self._notify(message=LocalSearchMessage.ReplacementRejected)
| |
<reponame>lara-martin/ASTER-X
from copy import deepcopy
from collections import defaultdict
from aster_utils import *
# vehicle and vehicle_part were combined into vehicle in the json
# edited to make "+comestible" only refer to dead things & added +broken
selMutualExclusion = {
"+abstract": ["+animal", "+animate", "-animate", "+biotic", "+body_part", "+broken", "+comestible", "+concrete", "+elongated",
"+force", "+garment", "+human", "+int_control", "+location", "-location", "+machine", "+nonrigid",
"+pointy", "+refl", "-region", "+solid", "-solid", "+substance", "+vehicle"],
"+animal": ["+abstract", "+broken", "+comestible", "+communication", "+currency", "+eventive", "-location", "+machine", "+nonrigid",
"+organization", "+pointy", "-solid", "+sound", "+substance", "+time", "+vehicle"],
"+animate": ["+abstract", "-animate", "+broken", "+comestible", "+communication", "+currency", "+eventive", "-location", "+organization",
"+sound", "+time"],
"-animate": ["+abstract", "+animate", "+communication", "+currency", "+eventive", "+organization", "+sound",
"+time"],
"+biotic": ["+abstract", "+communication", "+currency", "+eventive", "+garment", "+machine", "+organization",
"+sound", "+time", "+vehicle"], # I'm assuming clothes can't be eaten
"+body_part": ["+abstract", "+communication", "+currency", "+eventive", "+garment", "+machine", "+organization",
"+refl", "-solid", "+sound", "+substance", "+time", "+vehicle"],
# assuming there aren't robotic arms
"+broken": ["+abstract", "+animal", "+animate", "+concrete", "+currency", "+eventive", "+force", "+human", "+int_control", "+location",
"-location", "+machine", "+nonrigid", "+organization", "+refl", "-region", "-solid", "+sound", "+substance", "+time", "+vehicle"],
# added in concrete so that it works
"+comestible": ["+abstract", "+communication", "+currency", "+eventive", "+garment", "+machine", "+organization", "+refl", "+sound", "+time",
"+vehicle", "+human", "+animate", "+animal", "+int_control"],
"+communication": ["+animal", "+animate", "-animate", "+biotic", "+body_part", "+comestible", "+concrete",
"+currency", "+elongated", "+force", "+garment", "+human", "+int_control", "+location",
"-location", "+machine", "+nonrigid", "+organization", "+pointy", "+refl", "-region", "+solid",
"-solid", "+substance", "+time", "+vehicle"],
"+concrete": ["+abstract", "+broken", "+communication", "+eventive", "+sound", "+time"],
"+currency": ["+animal", "+animate", "+biotic", "+body_part", "+broken", "+comestible", "+communication", "+eventive",
"+force", "+garment", "+human", "+int_control", "+location", "-location", "+machine", "+organization",
"+refl", "-region", "-solid", "+substance", "+time", "+vehicle"],
"+elongated": ["+abstract", "+communication", "+eventive", "+organization", "-solid", "+sound", "+substance",
"+time"],
"+eventive": ["+animal", "+animate", "-animate", "+biotic", "+body_part", "+broken", "+comestible", "+concrete", "+currency",
"+elongated", "+force", "+garment", "+human", "+int_control", "+location", "+machine", "+nonrigid",
"+organization", "+pointy", "-region", "+solid", "-solid", "+substance", "+vehicle"],
"+force": ["+abstract", "-animate", "+broken", "+communication", "+eventive", "+garment", "+location", "-location",
"+nonrigid", "+organization", "-region", "+time"],
"+garment": ["+abstract", "+animate", "+communication", "+currency", "+eventive", "+force", "+human",
"+int_control", "+machine", "+organization", "+pointy", "+refl", "-solid", "+sound", "+substance",
"+time", "+vehicle"],
"+human": ["+abstract", "+body_part", "+broken", "+comestible", "+communication", "+currency", "+eventive", "+garment", "+machine",
"+organization", "+pointy", "-solid", "+sound", "+substance", "+time", "+vehicle"],
"+int_control": ["+abstract", "-animate", "+broken", "+comestible", "communication", "+currency", "+eventive", "+garment", "+organization",
"+sound", "+time"],
"+location": ["+abstract", "+broken", "+communication", "+currency", "+elongated", "+eventive", "+force", "-location", "+refl",
"-solid", "+sound", "+substance", "+time"],
"-location": ["+abstract", "+animal", "+animate", "+biotic", "+body_part", "+broken", "+comestible", "+communication",
"+currency", "+elongated", "+force", "+garment", "+human", "+int_control", "+location", "+machine",
"+nonrigid", "+pointy", "+refl", "+solid", "-solid", "+sound", "+substance", "+time", "+vehicle"],
"+machine": ["+abstract", "+animal", "+biotic", "+body_part", "+broken", "+comestible", "+communication", "+currency",
"+eventive", "+garment", "+human", "+organization", "-solid", "+sound", "+substance", "+time"],
"+nonrigid": ["+abstract", "+broken", "+communication", "+eventive", "+organization", "+pointy", "+sound", "+time",
"+vehicle"],
"+organization": ["+animal", "+animate", "-animate", "+biotic", "+body_part", "+broken", "+comestible", "+communication",
"+concrete", "+currency", "+elongated", "+eventive", "+force", "+garment", "+int_control",
"+machine", "+nonrigid", "+pointy", "+solid", "-solid", "+sound", "+substance", "+time",
"+vehicle"],
"+plural": ["+substance", "+time"],
"+pointy": ["+abstract", "+animal", "+communication", "+currency", "+eventive", "+garment", "+human", "+nonrigid",
"+organization", "-solid", "+sound", "+substance", "+time", "+vehicle"],
"+refl": ["+abstract", "+body_part", "+broken", "+comestible", "+communication", "+currency", "+eventive", "+garment",
"+location", "-location", "-region"],
"-region": ["+abstract", "+broken", "+communication", "+currency", "+eventive", "+force", "+refl", "+sound", "+substance",
"+time"],
"+solid": ["+abstract", "+communication", "+eventive", "-location", "+organization", "-solid", "+sound",
"+substance"],
"-solid": ["+abstract", "+animal", "+body_part","+broken", "+communication", "+currency", "+elongated", "+eventive",
"+garment", "+human", "-location", "+machine", "+organization", "+pointy", "+solid", "+sound", "+time",
"+vehicle"], # liquids
"+sound": ["+animal", "+animate", "-animate", "+biotic", "+body_part","+broken", "+comestible", "+concrete", "+currency",
"+elongated", "+garment", "+human", "+int_control", "+location", "-location", "+machine", "+nonrigid",
"+organization", "+pointy", "-region", "+solid", "-solid", "+substance", "+time", "+vehicle"],
"+substance": ["+abstract", "+animal", "+body_part", "+broken", "+communication", "+currency", "+elongated", "+eventive",
"+garment", "+human", "+location", "-location", "+machine", "+organization", "+plural", "+pointy",
"+solid", "+sound", "+time", "+vehicle"], # also liquids
"+time": ["+animal", "+animate", "-animate", "+biotic","+broken", "+body_part", "+comestible", "+communication", "+concrete",
"+currency", "+elongated", "+force", "+garment", "+human", "+int_control", "+location", "-location",
"+machine", "+nonrigid", "+organization", "+pointy", "-region", "+solid", "-solid", "+sound",
"+substance", "+vehicle"],
"+vehicle": ["+abstract", "+animal", "-animate", "+biotic", "+body_part", "+broken", "+comestible", "+communication",
"+currency", "+eventive", "+garment", "+human", "-location", "+nonrigid", "+organization", "-solid",
"+sound", "+substance", "+time"],
"+question": ["+animal", "+animate", "-animate", "+biotic", "+body_part", "+broken", "+comestible", "+concrete",
"+currency", "+elongated", "+eventive", "+force", "+garment", "+human", "+int_control", "+location",
"-location", "+machine", "+nonrigid", "+organization", "+pointy", "+refl", "-region", "+solid", "-solid",
"+substance", "+time", "+vehicle"]
}
def changePredicate(predicate):
current_name = predicate.predicate
if current_name == "disappear":
predicate.predicate = "visible"
predicate.negated = not predicate.negated
elif current_name == "appear":
predicate.predicate = "visible"
elif current_name == "location":
predicate.predicate = "has_location"
elif current_name == "be":
predicate.predicate = "exist"
elif current_name == "suffocate":
predicate.predicate = "alive"
predicate.negated = not predicate.negated
elif current_name == "suffocated":
predicate.predicate = "alive"
predicate.negated = not predicate.negated
elif current_name == "property":
predicate.predicate = "has_property"
elif current_name == "neglect":
predicate.predicate = "take_care_of"
predicate.negated = not predicate.negated
elif current_name == "confined":
predicate.predicate = "free"
predicate.negated = not predicate.negated
elif current_name == "destroyed":
predicate.predicate = "function"
predicate.negated = not predicate.negated
elif current_name == "degradation_material_integrity":
predicate.predicate = "function"
predicate.negated = not predicate.negated
elif current_name == "linger":
predicate.predicate = "delay"
elif current_name == "harmed":
predicate.predicate = "healthy"
predicate.negated = not predicate.negated
return predicate
def actorOnlyAllowedOne(predicate_name):
# the predicate.subject should only be allowed to have one of these
return predicate_name in ["has_location", "capacity", "has_configuration", "has_orientation",
"has_position", "has_state", "has_val"]
def patientOnlyAllowedOne(predicate_name):
# in all of the subjects, only one should have this for each patient
return predicate_name in ["has_possession"]
def transitivePreds(predicate):
if predicate.predicate not in ["together", "social_interaction", "correlated", "contact", "conflict",
"attached", "cooperate", "different", "group", "mingled", "relate"]:
return None
newPred = deepcopy(predicate)
newPred.roles_to_fill = [predicate.subject]
newPred.subject = predicate.roles_to_fill[0]
return newPred
"""
def predDoesNotAgreeWithSelRes(predicate, sel):
#TODO: finish this function
exclusions = {
"visible": ["+sound", "+eventive", "+time", "+abstract"],
}
if sel in exclusions[predicate]:
return True
return False
#rush(agent,theme)
"""
"""
def defaultPredicates(selrestr):
defaults = {
"+human": ["alive", "visible", "healthy", "free", "exist"],
"+concrete": ["exist", "visible"]
}
if selrestr in defaults: return defaults[selrestr]
else: return None
"""
def predLeadsToSel(pred, negated):
#add new selrestrs after certain predicates are activated
if pred == "alive" and negated:
return "+comestible"
if pred == "function" and not negated:
return "+concrete"
if pred == "function" and negated:
return "+broken"
return None
def replaceSels(newSel, oldSels):
newSels = set()
newSels.add(newSel)
for old in oldSels:
if old in selMutualExclusion[newSel]:
continue
else:
newSels.add(old)
return newSels
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def shouldRemovePred(predicate_name):
# remove these predicates because they are redundant or are verb-specific
return predicate_name in ["cause", "co-temporal", "do", "body_motion",
"intrinsic_motion", "motion", "rotational_motion", "overlaps", "adv", "direction",
"in_reaction_to", "apply_heat", "meets", "repeated_sequence", "transfer", "transfer_info",
"change_value", "fictive_motion", "continue"]
#convert, irrealis
def shouldBeReplaced(verb):
x = ["continue-55.3", "begin-55.1", "begin-55.1-1", "complete-55.2-1"]
if verb in x: return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class State:
# holds current set of conditions & selectional restrictions
def __init__(self):
self.conditions = defaultdict(set) # set of tuples that make up predicate
# conditions[entity] = set((True/False, predicate, param1, param2, ...))
# first element of tuple is whether or not it is negated
self.selRestrictions = defaultdict(set) # sels[entity] = set(sels)
def update(self, conds, sels):
self.conditions = conds
self.selRestrictions = sels
def returnDictionary(self):
final_dict = defaultdict(set)
for key in self.conditions.keys():
if key not in self.selRestrictions or self.selRestrictions[key] == None:
final_dict[key] = self.conditions[key]
else:
final_dict[key] = self.conditions[key] | self.selRestrictions[key]
theRest = self.selRestrictions.keys() - self.conditions.keys()
for key in theRest:
final_dict[key] = self.selRestrictions[key]
return final_dict
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Predicate:
def __init__(self, predicate, subject, roles, eventType, negated=False):
self.time = eventType
self.negated = negated
#y = "!" if self.negated else ""
# print("ORIGINAL PRED", self.time, y, predicate_string)
#predicate, roles_to_fill = predicate_string[:-1].split("(", 1)
#roles_to_fill = [x.strip() for x in roles_to_fill.split(",")]
self.subject = subject #roles_to_fill.pop(0)
self.roles_to_fill = roles #roles_to_fill
self.predicate = predicate # +"("+",".join(roles_to_fill)+")"
# print("SUBJECT",self.subject)
# print("FULL PREDICATE",predicate+"("+",".join(roles_to_fill)+")")
# print("TIME",self.time)
# print("NEGATED",self.negated)
def isSameAs(self, pred2):
if self.time == pred2.time and self.negated == pred2.negated and self.subject == pred2.subject and self.roles_to_fill == pred2.roles_to_fill and self.predicate == pred2.predicate:
return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def checkSel_SubCall(source_ls, role):
# checks to see that the the new restrictions in role work with current source_ls restrictions
# or if they don't match make sure there are no mutual exclusions
finalRoles = set()
if not source_ls: # if the old is empty, it passes automatically
return True, role
if not role:
return True, source_ls
if "" in source_ls:
source_ls.remove("")
if "" in role:
role.remove("")
# print("OLD",source_ls)
# print("NEW",role)
if not source_ls: # if the old is empty, it passes automatically
return True, role
if not role:
return True, source_ls
for new_role in role:
if new_role in source_ls:
finalRoles = deepcopy(source_ls)
continue
if "|" in new_role:
sel1, sel2 = new_role.split("|")
for old_role in source_ls:
if sel1 in old_role: # a match
finalRoles.add(sel1)
if sel2 in old_role: # a match
finalRoles.add(sel2)
if "|" in old_role:
o1, o2 = | |
'''
Send nack to accept response
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.txPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.txPacket.index)
return
self.transmit(packet)
self.remove(self.txPacket.index)
console.terse("Joiner Do Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Joinent(Correspondent):
'''
RAET protocol Joinent transaction class, dual of Joiner
'''
RedoTimeoutMin = 0.1 # initial timeout
RedoTimeoutMax = 2.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup Transaction instance
'''
kwa['kind'] = raeting.trnsKinds.join
super(Joinent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store, duration=0.0)
self.prep()
# Since corresponding bootstrap transaction use packet.index not self.index
self.add(self.rxPacket.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Joinent, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.join:
if packet.data['pk'] == raeting.pcktKinds.ack: #accepted by joiner
self.joined()
elif packet.data['pk'] == raeting.pcktKinds.nack: #rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nackJoin()
console.concise("Joinent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to perform the check for accepted status and then send accept
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
if (self.txPacket and
self.txPacket.data['pk'] == raeting.pcktKinds.response):
self.transmit(self.txPacket) #redo
console.concise("Joinent Redo Accept at {0}\n".format(self.stack.store.stamp))
else: #check to see if status has changed to accept
remote = self.stack.estates[self.reid]
if remote:
data = self.stack.safe.loadRemoteEstate(remote)
if data:
status = self.stack.safe.statusRemoteEstate(remote,
data['verhex'],
data['pubhex'])
if status == raeting.acceptances.accepted:
self.accept()
def prep(self):
'''
Prepare .txData
'''
#since bootstrap transaction use the reversed seid and deid from packet
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
se=self.rxPacket.data['de'],
de=self.rxPacket.data['se'],
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=raeting.coatKinds.nada,
fk=raeting.footKinds.nada,)
def join(self):
'''
Process join packet
Respond based on acceptance status of remote estate.
Rules for Colliding Estates
Apply the rules to ensure no colliding estates on (host, port)
If matching name estate found then return
Rules:
Only one estate with given eid is allowed on road
Only one estate with given name is allowed on road.
Only one estate with given ha on road is allowed on road.
Are multiple estates with same keys but different name (ha) allowed?
Current logic ignores same keys or not
Since creating new estate assigns unique eid,
we are looking for preexisting estates with any eid.
Processing steps:
I) Search remote estates for matching name
A) Found remote
1) HA not match
Search remotes for other matching HA but different name
If found other delete
Reuse found remote to be updated and joined
B) Not found
Search remotes for other matching HA
If found delete for now
Create new remote and update
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
name = body.get('name')
if not name:
emsg = "Missing remote name in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
verhex = body.get('verhex')
if not verhex:
emsg = "Missing remote verifier key in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
pubhex = body.get('pubhex')
if not pubhex:
emsg = "Missing remote crypt key in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
host = data['sh']
port = data['sp']
self.txData.update( dh=host, dp=port,) # responses use received host port
remote = self.stack.fetchRemoteByName(name)
if remote:
if not (host == remote.host and port == remote.port):
other = self.stack.fetchRemoteByHostPort(host, port)
if other and other is not remote: #may need to terminate transactions
try:
self.stack.removeRemote(other.eid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
remote.host = host
remote.port = port
remote.rsid = self.sid
remote.rtid = self.tid
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex)
else:
other = self.stack.fetchRemoteByHostPort(host, port)
if other: #may need to terminate transactions
try:
self.stack.removeRemote(other.eid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
remote = estating.RemoteEstate( stack=self.stack,
name=name,
host=host,
port=port,
acceptance=None,
verkey=verhex,
pubkey=pubhex,
rsid=self.sid,
rtid=self.tid, )
try:
self.stack.addRemote(remote) #provisionally add .accepted is None
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex)
self.stack.dumpRemote(remote)
self.reid = remote.eid # auto generated at instance creation above
if status == None or status == raeting.acceptances.pending:
self.ackJoin()
elif status == raeting.acceptances.accepted:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
self.accept()
else:
self.nackJoin()
emsg = "Estate {0} eid {1} keys rejected\n".format(
remote.name, remote.eid)
console.terse(emsg)
def ackJoin(self):
'''
Send ack to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
#since bootstrap transaction use updated self.reid
#self.txData.update( dh=self.stack.estates[self.reid].host,
#dp=self.stack.estates[self.reid].port,)
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
console.concise("Joinent Pending Accept at {0}\n".format(self.stack.store.stamp))
def accept(self):
'''
Send accept response to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
remote = self.stack.estates[self.reid]
body = odict([ ('leid', self.reid),
('reid', self.stack.estate.eid),
('name', self.stack.estate.name),
('verhex', self.stack.estate.signer.verhex),
('pubhex', self.stack.estate.priver.pubhex)])
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.response,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
console.concise("Joinent Do Accept at {0}\n".format(self.stack.store.stamp))
def joined(self):
'''
process ack to accept response
'''
if not self.stack.parseInner(self.rxPacket):
return
remote = self.stack.estates[self.reid]
remote.joined = True # accepted
remote.nextSid()
self.stack.dumpRemote(remote)
self.remove(self.rxPacket.index)
self.stack.incStat("join_correspond_complete")
def rejected(self):
'''
Process nack to accept response or stale
'''
if not self.stack.parseInner(self.rxPacket):
return
remote = self.stack.estates[self.reid]
# use presence to remove remote
self.remove(self.rxPacket.index)
console.terse("Joinent Rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def nackJoin(self):
'''
Send nack to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
self.remove(self.rxPacket.index)
console.terse("Joinent Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Allower(Initiator):
'''
RAET protocol Allower Initiator class Dual of Allowent
CurveCP handshake
'''
Timeout = 4.0
RedoTimeoutMin = 0.25 # initial timeout
RedoTimeoutMax = 1.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.allow
super(Allower, self).__init__(**kwa)
self.oreo = None # cookie from correspondent needed until handshake completed
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
remote = self.stack.estates[self.reid]
if not remote.joined:
emsg = "Must be joined first"
console.terse(emsg + '\n')
self.stack.incStat('unjoined_allow_attempt')
return
#raise raeting.TransactionError(emsg)
remote.refresh() # refresh short term keys and .allowed
self.sid = remote.sid
self.tid = remote.nextTid()
self.prep() # prepare .txData
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Allower, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.allow:
if packet.data['pk'] == raeting.pcktKinds.cookie:
self.cookie()
elif packet.data['pk'] == raeting.pcktKinds.ack:
self.allow()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.remove()
console.concise("Allower timed out at {0}\n".format(self.stack.store.stamp))
return
# need keep sending join until accepted or timed out
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMin)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.hello:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Hello at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.initiate:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Initiate at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Ack Final at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid, )
def hello(self):
'''
Send hello request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
plain = binascii.hexlify("".rjust(32, '\x00'))
cipher, nonce = remote.privee.encrypt(plain, remote.pubber.key)
body = raeting.HELLO_PACKER.pack(plain, remote.privee.pubraw, cipher, nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.hello,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allower Do Hello at {0}\n".format(self.stack.store.stamp))
def cookie(self):
'''
Process cookie packet
| |
and will provide a dict
with the following contents:
* ``x`` data-space x-coordinate of the mouse
* ``y`` data-space y-coordinate of the mouse
* ``sx`` screen-space x-coordinate of the mouse
* ``sy`` screen-space y-coordinate of the mouse
* ``data_x`` data-space x-coordinate of the hovered glyph
* ``data_y`` data-space y-coordinate of the hovered glyph
* ``indices`` column indices of all currently hovered glyphs
If the hover is over a "multi" glyph such as ``Patches`` or ``MultiLine``
then a ``segment_index`` key will also be present.
Finally, the value of the format passed in the tooltip specification is
available as the ``format`` variable.
The snippet will be made into the body of a function and therefore requires
a return statement.
Example:
.. code-block:: javascript
code = '''
return value + " total"
'''
""")
class HoverTool(Inspection):
''' *toolbar icon*: |hover_icon|
The hover tool is a passive inspector tool. It is generally on at all
times, but can be configured in the inspector's menu associated with the
*toolbar icon* shown above.
By default, the hover tool displays informational tooltips whenever the
cursor is directly over a glyph. The data to show comes from the glyph's
data source, and what to display is configurable with the ``tooltips``
property that maps display names to columns in the data source, or to
special known variables.
Here is an example of how to configure and use the hover tool::
# Add tooltip (name, field) pairs to the tool. See below for a
# description of possible field values.
hover.tooltips = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("radius", "@radius"),
("fill color", "$color[hex, swatch]:fill_color"),
("foo", "@foo"),
("bar", "@bar"),
("baz", "@baz{safe}"),
("total", "@total{$0,0.00}"
]
You can also supply a ``Callback`` to the ``HoverTool``, to build custom
interactions on hover. In this case you may want to turn the tooltips
off by setting ``tooltips=None``.
.. warning::
When supplying a callback or custom template, the explicit intent
of this Bokeh Model is to embed *raw HTML and JavaScript code* for
a browser to execute. If any part of the code is derived from untrusted
user inputs, then you must take appropriate care to sanitize the user
input prior to passing to Bokeh.
Hover tool does not currently work with the following glyphs:
.. hlist::
:columns: 3
* annulus
* arc
* bezier
* image_url
* oval
* patch
* quadratic
* ray
* step
* text
.. |hover_icon| image:: /_images/icons/Hover.png
:height: 24px
'''
names = List(String, help="""
A list of names to query for. If set, only renderers that have a matching
value for their ``name`` attribute will be used.
""")
renderers = Either(Auto, List(Instance(Renderer)), default="auto", help="""
An explicit list of renderers to hit test against. If unset, defaults to
all renderers on a plot.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the input's value changes. The
``cb_data`` parameter that is available to the Callback code will contain two
``HoverTool`` specific fields:
:index: object containing the indices of the hovered points in the data source
:geometry: object containing the coordinates of the hover cursor
""")
tooltips = Either(String, List(Tuple(String, String)),
default=[
("index","$index"),
("data (x, y)","($x, $y)"),
("screen (x, y)","($sx, $sy)"),
], help="""
The (name, field) pairs describing what the hover tool should
display when there is a hit.
Field names starting with "@" are interpreted as columns on the
data source. For instance, "@temp" would look up values to display
from the "temp" column of the data source.
Field names starting with "$" are special, known fields:
:$index: index of hovered point in the data source
:$name: value of the ``name`` property of the hovered glyph renderer
:$x: x-coordinate under the cursor in data space
:$y: y-coordinate under the cursor in data space
:$sx: x-coordinate under the cursor in screen (canvas) space
:$sy: y-coordinate under the cursor in screen (canvas) space
:$color: color data from data source, with the syntax:
``$color[options]:field_name``. The available options
are: 'hex' (to display the color as a hex value), and
'swatch' to also display a small color swatch.
Field names that begin with ``@`` are associated with columns in a
``ColumnDataSource``. For instance the field name ``"@price"`` will
display values from the ``"price"`` column whenever a hover is triggered.
If the hover is for the 17th glyph, then the hover tooltip will
correspondingly display the 17th price value.
Note that if a column name contains spaces, the it must be supplied by
surrounding it in curly braces, e.g. ``@{adjusted close}`` will display
values from a column named ``"adjusted close"``.
Sometimes (especially with stacked charts) it is desirable to allow the
name of the column be specified indirectly. The field name ``@$name`` is
distinguished in that it will look up the ``name`` field on the hovered
glyph renderer, and use that value as the column name. For instance, if
a user hovers with the name ``"US East"``, then ``@$name`` is equivalent to
``@{US East}``.
By default, values for fields (e.g. ``@foo``) are displayed in a basic
numeric format. However it is possible to control the formatting of values
more precisely. Fields can be modified by appending a format specified to
the end in curly braces. Some examples are below.
.. code-block:: python
"@foo{0,0.000}" # formats 10000.1234 as: 10,000.123
"@foo{(.00)}" # formats -10000.1234 as: (10000.123)
"@foo{($ 0.00 a)}" # formats 1230974 as: $ 1.23 m
Specifying a format ``{safe}`` after a field name will override automatic
escaping of the tooltip data source. Any HTML tags in the data tags will
be rendered as HTML in the resulting HoverTool output. See
:ref:`custom_hover_tooltip` for a more detailed example.
``None`` is also a valid value for tooltips. This turns off the
rendering of tooltips. This is mostly useful when supplying other
actions on hover via the callback property.
.. note::
The tooltips attribute can also be configured with a mapping type,
e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,
the visual presentation order is unspecified.
""").accepts(Dict(String, String), lambda d: list(d.items()))
formatters = Dict(String, Either(Enum(TooltipFieldFormatter), Instance(CustomJSHover)), default=lambda: dict(), help="""
Specify the formatting scheme for data source columns, e.g.
.. code-block:: python
tool.formatters = {"@date": "datetime"}
will cause format specifications for the "date" column to be interpreted
according to the "datetime" formatting scheme. The following schemes are
available:
:``"numeral"``:
Provides a wide variety of formats for numbers, currency, bytes, times,
and percentages. The full set of formats can be found in the
|NumeralTickFormatter| reference documentation.
:``"datetime"``:
Provides formats for date and time values. The full set of formats is
listed in the |DatetimeTickFormatter| reference documentation.
:``"printf"``:
Provides formats similar to C-style "printf" type specifiers. See the
|PrintfTickFormatter| reference documentation for complete details.
If no formatter is specified for a column name, the default ``"numeral"``
formatter is assumed.
.. |NumeralTickFormatter| replace:: :class:`~bokeh.models.formatters.NumeralTickFormatter`
.. |DatetimeTickFormatter| replace:: :class:`~bokeh.models.formatters.DatetimeTickFormatter`
.. |PrintfTickFormatter| replace:: :class:`~bokeh.models.formatters.PrintfTickFormatter`
""")
mode = Enum("mouse", "hline", "vline", help="""
Whether to consider hover pointer as a point (x/y values), or a
span on h or v directions.
""")
muted_policy = Enum("show", "ignore",
default="show", help="""
Whether to avoid showing tooltips on muted glyphs.
""")
point_policy = Enum("snap_to_data", "follow_mouse", "none", help="""
Whether the tooltip position should snap to the "center" (or other anchor)
position of the associated glyph, or always follow the current mouse cursor
position.
""")
line_policy = Enum("prev", "next", "nearest", "interp", "none",
default="nearest", help="""
When showing tooltips for lines, designates whether the tooltip position
should be the "previous" or "next" points on the line, the "nearest" point
to the current mouse position, or "interpolate" along the line to the
current mouse position.
""")
anchor = Enum(Anchor, default="center", help="""
If point policy is set to `"snap_to_data"`, `anchor` defines the attachment
point of a tooltip. The default is to attach to the center of a glyph.
""")
attachment = Enum(TooltipAttachment, help="""
Whether the | |
selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsTargetAverageValue(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResource(dict):
"""
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
def __init__(__self__, *,
name: str,
target_average_utilization: Optional[int] = None,
target_average_value: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValue'] = None):
"""
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
:param str name: name is the name of the resource in question.
:param int target_average_utilization: targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
:param 'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValueArgs' target_average_value: targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
pulumi.set(__self__, "name", name)
if target_average_utilization is not None:
pulumi.set(__self__, "target_average_utilization", target_average_utilization)
if target_average_value is not None:
pulumi.set(__self__, "target_average_value", target_average_value)
@property
@pulumi.getter
def name(self) -> str:
"""
name is the name of the resource in question.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="targetAverageUtilization")
def target_average_utilization(self) -> Optional[int]:
"""
targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
"""
return pulumi.get(self, "target_average_utilization")
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValue']:
"""
targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
return pulumi.get(self, "target_average_value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValue(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpec(dict):
"""
SeldonScaledObjectSpec is the spec for a KEDA ScaledObject resource
"""
def __init__(__self__, *,
triggers: Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggers'],
advanced: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvanced'] = None,
cooldown_period: Optional[int] = None,
max_replica_count: Optional[int] = None,
min_replica_count: Optional[int] = None,
polling_interval: Optional[int] = None):
"""
SeldonScaledObjectSpec is the spec for a KEDA ScaledObject resource
:param 'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedArgs' advanced: AdvancedConfig specifies advance scaling options
"""
pulumi.set(__self__, "triggers", triggers)
if advanced is not None:
pulumi.set(__self__, "advanced", advanced)
if cooldown_period is not None:
pulumi.set(__self__, "cooldown_period", cooldown_period)
if max_replica_count is not None:
pulumi.set(__self__, "max_replica_count", max_replica_count)
if min_replica_count is not None:
pulumi.set(__self__, "min_replica_count", min_replica_count)
if polling_interval is not None:
pulumi.set(__self__, "polling_interval", polling_interval)
@property
@pulumi.getter
def triggers(self) -> Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggers']:
return pulumi.get(self, "triggers")
@property
@pulumi.getter
def advanced(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvanced']:
"""
AdvancedConfig specifies advance scaling options
"""
return pulumi.get(self, "advanced")
@property
@pulumi.getter(name="cooldownPeriod")
def cooldown_period(self) -> Optional[int]:
return pulumi.get(self, "cooldown_period")
@property
@pulumi.getter(name="maxReplicaCount")
def max_replica_count(self) -> Optional[int]:
return pulumi.get(self, "max_replica_count")
@property
@pulumi.getter(name="minReplicaCount")
def min_replica_count(self) -> Optional[int]:
return pulumi.get(self, "min_replica_count")
@property
@pulumi.getter(name="pollingInterval")
def polling_interval(self) -> Optional[int]:
return pulumi.get(self, "polling_interval")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvanced(dict):
"""
AdvancedConfig specifies advance scaling options
"""
def __init__(__self__, *,
horizontal_pod_autoscaler_config: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfig'] = None,
restore_to_original_replica_count: Optional[bool] = None):
"""
AdvancedConfig specifies advance scaling options
:param 'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigArgs' horizontal_pod_autoscaler_config: HorizontalPodAutoscalerConfig specifies horizontal scale config
"""
if horizontal_pod_autoscaler_config is not None:
pulumi.set(__self__, "horizontal_pod_autoscaler_config", horizontal_pod_autoscaler_config)
if restore_to_original_replica_count is not None:
pulumi.set(__self__, "restore_to_original_replica_count", restore_to_original_replica_count)
@property
@pulumi.getter(name="horizontalPodAutoscalerConfig")
def horizontal_pod_autoscaler_config(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfig']:
"""
HorizontalPodAutoscalerConfig specifies horizontal scale config
"""
return pulumi.get(self, "horizontal_pod_autoscaler_config")
@property
@pulumi.getter(name="restoreToOriginalReplicaCount")
def restore_to_original_replica_count(self) -> Optional[bool]:
return pulumi.get(self, "restore_to_original_replica_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfig(dict):
"""
HorizontalPodAutoscalerConfig specifies horizontal scale config
"""
def __init__(__self__, *,
behavior: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehavior'] = None,
resource_metrics: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetrics']] = None):
"""
HorizontalPodAutoscalerConfig specifies horizontal scale config
:param 'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorArgs' behavior: HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).
"""
if behavior is not None:
pulumi.set(__self__, "behavior", behavior)
if resource_metrics is not None:
pulumi.set(__self__, "resource_metrics", resource_metrics)
@property
@pulumi.getter
def behavior(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehavior']:
"""
HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).
"""
return pulumi.get(self, "behavior")
@property
@pulumi.getter(name="resourceMetrics")
def resource_metrics(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetrics']]:
return pulumi.get(self, "resource_metrics")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehavior(dict):
"""
HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).
"""
def __init__(__self__, *,
scale_down: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDown'] = None,
scale_up: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUp'] = None):
"""
HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).
:param 'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownArgs' scale_down: scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).
:param 'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpArgs' scale_up: scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of: * increase no more than 4 pods per 60 seconds * double the number of pods per 60 seconds No stabilization is used.
"""
if scale_down is not None:
pulumi.set(__self__, "scale_down", | |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from subprocess import Popen, PIPE
emojis="""⛑🏻 Helmet With White Cross, Type-1-2
💏🏻 Kiss, Type-1-2
💑🏻 Couple With Heart, Type-1-2
⛷🏻 Skier, Type-1-2
😀 Grinning Face
😁 Beaming Face With Smiling Eyes
😂 Face With Tears of Joy
🤣 Rolling on the Floor Laughing
😃 Grinning Face With Big Eyes
😄 Grinning Face With Smiling Eyes
😅 Grinning Face With Sweat
😆 Grinning Squinting Face
😉 Winking Face
😊 Smiling Face With Smiling Eyes
😋 Face Savoring Food
😎 Smiling Face With Sunglasses
😍 Smiling Face With Heart-Eyes
😘 Face Blowing a Kiss
🥰 Smiling Face With 3 Hearts
😗 Kissing Face
😙 Kissing Face With Smiling Eyes
😚 Kissing Face With Closed Eyes
☺️ Smiling Face
🙂 Slightly Smiling Face
🤗 Hugging Face
🤩 Star-Struck
🤔 Thinking Face
🤨 Face With Raised Eyebrow
😐 Neutral Face
😑 Expressionless Face
😶 Face Without Mouth
🙄 Face With Rolling Eyes
😏 Smirking Face
😣 Persevering Face
😥 Sad but Relieved Face
😮 Face With Open Mouth
🤐 Zipper-Mouth Face
😯 Hushed Face
😪 Sleepy Face
😫 Tired Face
😴 Sleeping Face
😌 Relieved Face
😛 Face With Tongue
😜 Winking Face With Tongue
😝 Squinting Face With Tongue
🤤 Drooling Face
😒 Unamused Face
😓 Downcast Face With Sweat
😔 Pensive Face
😕 Confused Face
🙃 Upside-Down Face
🤑 Money-Mouth Face
😲 Astonished Face
☹️ Frowning Face
🙁 Slightly Frowning Face
😖 Confounded Face
😞 Disappointed Face
😟 Worried Face
😤 Face With Steam From Nose
😢 Crying Face
😭 Loudly Crying Face
😦 Frowning Face With Open Mouth
😧 Anguished Face
😨 Fearful Face
😩 Weary Face
🤯 Exploding Head
😬 Grimacing Face
😰 Anxious Face With Sweat
😱 Face Screaming in Fear
🥵 Hot Face
🥶 Cold Face
😳 Flushed Face
🤪 Zany Face
😵 Dizzy Face
😡 Pouting Face
😠 Angry Face
🤬 Face With Symbols on Mouth
😷 Face With Medical Mask
🤒 Face With Thermometer
🤕 Face With Head-Bandage
🤢 Nauseated Face
🤮 Face Vomiting
🤧 Sneezing Face
😇 Smiling Face With Halo
🤠 Cowboy Hat Face
🤡 Clown Face
🥳 Partying Face
🥴 Woozy Face
🥺 Pleading Face
🤥 Lying Face
🤫 Shushing Face
🤭 Face With Hand Over Mouth
🧐 Face With Monocle
🤓 Nerd Face
😈 Smiling Face With Horns
👿 Angry Face With Horns
👹 Ogre
👺 Goblin
💀 Skull
☠️ Skull and Crossbones
👻 Ghost
👽 Alien
👾 Alien Monster
🤖 Robot Face
💩 Pile of Poo
😺 Grinning Cat Face
😸 Grinning Cat Face With Smiling Eyes
😹 Cat Face With Tears of Joy
😻 Smiling Cat Face With Heart-Eyes
😼 Cat Face With Wry Smile
😽 Kissing Cat Face
🙀 Weary Cat Face
😿 Crying Cat Face
😾 Pouting Cat Face
🙈 See-No-Evil Monkey
🙉 Hear-No-Evil Monkey
🙊 Speak-No-Evil Monkey
👶 Baby
🧒 Child
👦 Boy
👧 Girl
🧑 Adult
👨 Man
👩 Woman
🧓 Older Adult
👴 Old Man
👵 Old Woman
👨⚕️ Man Health Worker
👩⚕️ Woman Health Worker
👨🎓 Man Student
👩🎓 Woman Student
👨🏫 Man Teacher
👩🏫 Woman Teacher
👨⚖️ Man Judge
👩⚖️ Woman Judge
👨🌾 Man Farmer
👩🌾 Woman Farmer
👨🍳 Man Cook
👩🍳 Woman Cook
👨🔧 Man Mechanic
👩🔧 Woman Mechanic
👨🏭 Man Factory Worker
👩🏭 Woman Factory Worker
👨💼 Man Office Worker
👩💼 Woman Office Worker
👨🔬 Man Scientist
👩🔬 Woman Scientist
👨💻 Man Technologist
👩💻 Woman Technologist
👨🎤 Man Singer
👩🎤 Woman Singer
👨🎨 Man Artist
👩🎨 Woman Artist
👨✈️ Man Pilot
👩✈️ Woman Pilot
👨🚀 Man Astronaut
👩🚀 Woman Astronaut
👨🚒 Man Firefighter
👩🚒 Woman Firefighter
👮 Police Officer
👮♂️ Man Police Officer
👮♀️ Woman Police Officer
🕵️ Detective
🕵️♂️ Man Detective
🕵️♀️ Woman Detective
💂 Guard
💂♂️ Man Guard
💂♀️ Woman Guard
👷 Construction Worker
👷♂️ Man Construction Worker
👷♀️ Woman Construction Worker
🤴 Prince
👸 Princess
👳 Person Wearing Turban
👳♂️ Man Wearing Turban
👳♀️ Woman Wearing Turban
👲 Man With Chinese Cap
🧕 Woman With Headscarf
🧔 Bearded Person
👱 Blond-Haired Person
👱♂️ Blond-Haired Man
👱♀️ Blond-Haired Woman
👨🦰 Man, Red Haired
👩🦰 Woman, Red Haired
👨🦱 Man, Curly Haired
👩🦱 Woman, Curly Haired
👨🦲 Man, Bald
👩🦲 Woman, Bald
👨🦳 Man, White Haired
👩🦳 Woman, White Haired
🤵 Man in Tuxedo
👰 Bride With Veil
🤰 Pregnant Woman
🤱 Breast-Feeding
👼 Baby Angel
🎅 Santa Claus
🤶 Mrs. Claus
🦸 Superhero
🦸♀️ Woman Superhero
🦸♂️ Man Superhero
👯🏻 Woman With Bunny Ears, Type-1-2
🦹 Supervillain
👯🏻♂️ Men With Bunny Ears Partying, Type-1-2
🦹♀️ Woman Supervillain
👯🏻♀️ Women With Bunny Ears Partying, Type-1-2
🦹♂️ Man Supervillain
👫🏻 Man and Woman Holding Hands, Type-1-2
🧙 Mage
👬🏻 Two Men Holding Hands, Type-1-2
🧙♀️ Woman Mage
👭🏻 Two Women Holding Hands, Type-1-2
🧙♂️ Man Mage
👪🏻 Family, Type-1-2
🧚 Fairy
🧚♀️ Woman Fairy
🧚♂️ Man Fairy
🧛 Vampire
🧛♀️ Woman Vampire
🧛♂️ Man Vampire
🧜 Merperson
🧜♀️ Mermaid
🧜♂️ Merman
🧝 Elf
🧝♀️ Woman Elf
🧝♂️ Man Elf
🧞 Genie
🧞♀️ Woman Genie
🧞♂️ Man Genie
🧟 Zombie
🧟♀️ Woman Zombie
🧟♂️ Man Zombie
🙍 Person Frowning
🙍♂️ Man Frowning
🙍♀️ Woman Frowning
🙎 Person Pouting
🙎♂️ Man Pouting
🙎♀️ Woman Pouting
🙅 Person Gesturing No
🙅♂️ Man Gesturing No
🤝🏻 Handshake, Type-1-2
🙅♀️ Woman Gesturing No
🙆 Person Gesturing OK
🙆♂️ Man Gesturing OK
🙆♀️ Woman Gesturing OK
💁 Person Tipping Hand
💁♂️ Man Tipping Hand
💁♀️ Woman Tipping Hand
🙋 Person Raising Hand
🙋♂️ Man Raising Hand
🙋♀️ Woman Raising Hand
🙇 Person Bowing
🙇♂️ Man Bowing
🙇♀️ Woman Bowing
🤦 Person Facepalming
🤦♂️ Man Facepalming
🤦♀️ Woman Facepalming
🤷 Person Shrugging
🤷♂️ Man Shrugging
🤷♀️ Woman Shrugging
💆 Person Getting Massage
💆♂️ Man Getting Massage
💆♀️ Woman Getting Massage
💇 Person Getting Haircut
💇♂️ Man Getting Haircut
💇♀️ Woman Getting Haircut
🚶 Person Walking
🚶♂️ Man Walking
🚶♀️ Woman Walking
🏃 Person Running
🏃♂️ Man Running
🏃♀️ Woman Running
💃 Woman Dancing
🕺 Man Dancing
👯 People With Bunny Ears
👯♂️ Men With Bunny Ears
👯♀️ Women With Bunny Ears
🧖 Person in Steamy Room
🧖♀️ Woman in Steamy Room
🧖♂️ Man in Steamy Room
🧗 Person Climbing
🧗♀️ Woman Climbing
🧗♂️ Man Climbing
🧘 Person in Lotus Position
🧘♀️ Woman in Lotus Position
🧘♂️ Man in Lotus Position
🛀 Person Taking Bath
🛌 Person in Bed
🕴️ Man in Suit Levitating
🗣️ Speaking Head
👤 Bust in Silhouette
👥 Busts in Silhouette
🤺 Person Fencing
🏇 Horse Racing
⛷️ Skier
🏂 Snowboarder
🏌️ Person Golfing
🏌️♂️ Man Golfing
🏌️♀️ Woman Golfing
🏄 Person Surfing
🏄♂️ Man Surfing
🏄♀️ Woman Surfing
🚣 Person Rowing Boat
🚣♂️ Man Rowing Boat
🚣♀️ Woman Rowing Boat
🏊 Person Swimming
🏊♂️ Man Swimming
🏊♀️ Woman Swimming
⛹️ Person Bouncing Ball
⛹️♂️ Man Bouncing Ball
⛹️♀️ Woman Bouncing Ball
🏋️ Person Lifting Weights
🏋️♂️ Man Lifting Weights
🏋️♀️ Woman Lifting Weights
🚴 Person Biking
🚴♂️ Man Biking
🚴♀️ Woman Biking
🚵 Person Mountain Biking
🚵♂️ Man Mountain Biking
🚵♀️ Woman Mountain Biking
🏎️ Racing Car
🏍️ Motorcycle
🤸 Person Cartwheeling
🤼🏻 Wrestlers, Type-1-2
🤸♂️ Man Cartwheeling
🤼🏻♂️ Men Wrestling, Type-1-2
🤼🏻♀️ Women Wrestling, Type-1-2
🤸♀️ Woman Cartwheeling
🤼 People Wrestling
🤼♂️ Men Wrestling
🤼♀️ Women Wrestling
🤽 Person Playing Water Polo
🤽♂️ Man Playing Water Polo
🤽♀️ Woman Playing Water Polo
🤾 Person Playing Handball
🤾♂️ Man Playing Handball
🤾♀️ Woman Playing Handball
🤹 Person Juggling
🤹♂️ Man Juggling
🤹♀️ Woman Juggling
👫 Man and Woman Holding Hands
👬 Two Men Holding Hands
👭 Two Women Holding Hands
💏 Kiss
👩❤️💋👨 Kiss: Woman, Man
👨❤️💋👨 Kiss: Man, Man
👩❤️💋👩 Kiss: Woman, Woman
💑 Couple With Heart
👩❤️👨 Couple With Heart: Woman, Man
👨❤️👨 Couple With Heart: Man, Man
👩❤️👩 Couple With Heart: Woman, Woman
👪 Family
👨👩👦 Family: Man, Woman, Boy
👨👩👧 Family: Man, Woman, Girl
👨👩👧👦 Family: Man, Woman, Girl, Boy
👨👩👦👦 Family: Man, Woman, Boy, Boy
👨👩👧👧 Family: Man, Woman, Girl, Girl
👨👨👦 Family: Man, Man, Boy
👨👨👧 Family: Man, Man, Girl
👨👨👧👦 Family: Man, Man, Girl, Boy
👨👨👦👦 Family: Man, Man, Boy, Boy
👨👨👧👧 Family: Man, Man, Girl, Girl
👩👩👦 Family: Woman, Woman, Boy
👩👩👧 Family: Woman, Woman, Girl
👩👩👧👦 Family: Woman, Woman, Girl, Boy
👩👩👦👦 Family: Woman, Woman, Boy, Boy
👩👩👧👧 Family: Woman, Woman, Girl, Girl
👨👦 Family: Man, Boy
👨👦👦 Family: Man, Boy, Boy
👨👧 Family: Man, Girl
👨👧👦 Family: Man, Girl, Boy
👨👧👧 Family: Man, Girl, Girl
👩👦 Family: Woman, Boy
👩👦👦 Family: Woman, Boy, Boy
👩👧 Family: Woman, Girl
👩👧👦 Family: Woman, Girl, Boy
👩👧👧 Family: Woman, Girl, Girl
🤳 Selfie
💪 Flexed Biceps
🦵 Leg
🦶 Foot
👈 Backhand Index Pointing Left
👉 Backhand Index Pointing Right
☝️ Index Pointing Up
👆 Backhand Index Pointing Up
🖕 Middle Finger
👇 Backhand Index Pointing Down
✌️ Victory Hand
🤞 Crossed Fingers
🖖 Vulcan Salute
🤘 Sign of the Horns
🤙 Call Me Hand
🖐️ Hand With Fingers Splayed
✋ Raised Hand
👌 OK Hand
👍 Thumbs Up
👎 Thumbs Down
✊ Raised Fist
👊 Oncoming Fist
🤛 Left-Facing Fist
🤜 Right-Facing Fist
🤚 Raised Back of Hand
👋 Waving Hand
🤟 Love-You Gesture
✍️ Writing Hand
👏 Clapping Hands
👐 Open Hands
🙌 Raising Hands
🤲 Palms Up Together
🙏 Folded Hands
🤝 Handshake
💅 Nail Polish
👂 Ear
👃 Nose
🦰 Emoji Component Red Hair
🦱 Emoji Component Curly Hair
🦲 Emoji Component Bald
🦳 Emoji Component White Hair
👣 Footprints
👀 Eyes
👁️ Eye
👁️🗨️ Eye in Speech Bubble
🧠 Brain
🦴 Bone
🦷 Tooth
👅 Tongue
👄 Mouth
💋 Kiss Mark
💘 Heart With Arrow
❤️ Red Heart
💓 Beating Heart
💔 Broken Heart
💕 Two Hearts
💖 Sparkling Heart
💗 Growing Heart
💙 Blue Heart
💚 Green Heart
💛 Yellow Heart
🧡 Orange Heart
💜 Purple Heart
🖤 Black Heart
💝 Heart With Ribbon
💞 Revolving Hearts
💟 Heart Decoration
❣️ Heavy Heart Exclamation
💌 Love Letter
💤 Zzz
💢 Anger Symbol
💣 Bomb
💥 Collision
💦 Sweat Droplets
💨 Dashing Away
💫 Dizzy
💬 Speech Balloon
🗨️ Left Speech Bubble
🗯️ Right Anger Bubble
💭 Thought Balloon
🕳️ Hole
👓 Glasses
🕶️ Sunglasses
🥽 Goggles
🥼 Lab Coat
👔 Necktie
👕 T-Shirt
👖 Jeans
🧣 Scarf
🧤 Gloves
🧥 Coat
🧦 Socks
👗 Dress
👘 Kimono
👙 Bikini
👚 Woman’s Clothes
👛 Purse
👜 Handbag
👝 Clutch Bag
🛍️ Shopping Bags
🎒 School Backpack
👞 Man’s Shoe
👟 Running Shoe
🥾 Hiking Boot
🥿 Flat Shoe
👠 High-Heeled Shoe
👡 Woman’s Sandal
👢 Woman’s Boot
👑 Crown
👒 Woman’s Hat
🎩 Top Hat
🎓 Graduation Cap
🧢 Billed Cap
⛑️ Rescue Worker’s Helmet
📿 Prayer Beads
💄 Lipstick
💍 Ring
💎 Gem Stone
🐵 Monkey Face
🐒 Monkey
🦍 Gorilla
🐶 Dog Face
🐕 Dog
🐩 Poodle
🐺 Wolf Face
🦊 Fox Face
🦝 Raccoon
🐱 Cat Face
🐈 Cat
🦁 Lion Face
🐯 Tiger Face
🐅 Tiger
🐆 Leopard
🐴 Horse Face
🐎 Horse
🦄 Unicorn Face
🦓 | |
self.ckt.checkConstant(bn, 1)
# Helper routines to build up formula encoding all Brent constraints
# Declare (subset of) variables
# Optional symmetry map gives mapping between symmetric levels
# (mapping a self-symmetric level to itself)
def declareVariables(self, fixedList = [], symmetryMap = None):
# For generating symmetries
if symmetryMap is None:
pset = None
else:
varPermuter = {'alpha':'beta', 'beta':'alpha', 'gamma':'gamma'}
pset = {'level' : symmetryMap, 'variable' : varPermuter}
for level in unitRange(self.auxCount):
allVars = []
for cat in ['gamma', 'alpha', 'beta']:
nrow = self.nrow(cat)
ncol = self.ncol(cat)
allVars += [BrentVariable(cat, i//ncol+1, (i%ncol)+1, level) for i in range(nrow*ncol)]
candidateVars = [v for v in allVars if v not in fixedList]
if symmetryMap is None:
trueVars = candidateVars
replicatedPairs = []
else:
trueVars = []
replicatedPairs = []
for v in candidateVars:
mv = v.permute(pset)
if mv in fixedList or mv < v:
replicatedPairs.append((v, mv))
else:
trueVars.append(v)
if len(trueVars) > 0:
self.ckt.comment("Variables for auxilliary term %d" % level)
vec = circuit.Vec(trueVars)
self.ckt.declare(vec)
if len(replicatedPairs) > 0:
self.ckt.comment("Variables defined by symmetry for auxilliary term %d" % level)
for v, mv in replicatedPairs:
self.ckt.comment("Symmetry dependency %s %s" % (str(v), str(mv)))
self.ckt.andN(v, [mv])
def dfGenerator(self, streamlineNode = None, check = False, prefix = []):
level = 6 - len(prefix)
if level == 0:
# We're done. Reached the Brent term
return
ranges = self.fullRanges()
gcount = ranges[len(prefix)]
tlist = []
for x in unitRange(gcount):
nprefix = prefix + [x]
# Recursively generate next level term
self.dfGenerator(streamlineNode, check, prefix = nprefix)
tlist.append(BrentTerm(nprefix))
terms = self.ckt.addVec(circuit.Vec(tlist))
args = terms
if level == self.streamlineLevel and streamlineNode is not None:
tlist = [streamlineNode] + tlist
args = circuit.Vec(tlist)
bn = BrentTerm(prefix)
self.ckt.andN(bn, args)
self.ckt.decRefs([terms])
if check:
self.ckt.checkConstant(bn, 1)
if level == 6:
# Top level cleanup
if streamlineNode is not None:
self.ckt.decRefs([streamlineNode])
if not check:
names = circuit.Vec([bn])
self.ckt.comment("Find combined size for terms at level %d" % level)
self.ckt.information(names)
def oldBfGenerator(self, streamlineNode = None, check = False):
ranges = self.fullRanges()
for level in unitRange(6):
self.ckt.comment("Combining terms at level %d" % level)
gcount = ranges[-1]
ranges = ranges[:-1]
indices = indexExpand(ranges)
first = True
for idx in indices:
tlist = [BrentTerm(idx + [x]) for x in unitRange(gcount)]
terms = self.ckt.addVec(circuit.Vec(tlist))
args = terms
if level == self.streamlineLevel and streamlineNode is not None:
tlist = [streamlineNode] + tlist
args = circuit.Vec(tlist)
bn = BrentTerm(idx)
self.ckt.andN(bn, args)
self.ckt.decRefs([terms])
if check:
self.ckt.checkConstant(bn, 1)
if first and not check:
first = False
name = circuit.Vec([BrentTerm(idx)])
self.ckt.comment("Find size of typical function at level %d" % level)
self.ckt.information(name)
if streamlineNode is not None and level == self.streamlineLevel:
self.ckt.decRefs([streamlineNode])
if not check:
names = circuit.Vec([BrentTerm(idx) for idx in indices])
self.ckt.comment("Find combined size for terms at level %d" % level)
self.ckt.information(names)
def bfGenerator(self, streamlineNode = None, check = False, levelList = None):
if levelList is None:
levelList = unitRange(6)
ranges = self.fullRanges()
lastLevel = 0
if streamlineNode is not None and self.streamlineLevel not in levelList:
slevel = 0
for level in levelList:
if level > self.streamlineLevel:
break
slevel = level
else:
slevel = self.streamlineLevel
for level in levelList:
self.ckt.comment("Combining terms at level %d" % level)
gcounts = ranges[6-level:6-lastLevel]
ranges = ranges[:6-level]
indices = indexExpand(ranges)
for idx in indices:
tlist = [BrentTerm(idx + ls) for ls in indexExpand(gcounts)]
terms = self.ckt.addVec(circuit.Vec(tlist))
args = terms
if streamlineNode is not None and level == slevel:
tlist = [streamlineNode] + tlist
args = circuit.Vec(tlist)
bn = BrentTerm(idx)
if level in self.conjunctLevels:
self.ckt.conjunctN(bn, args)
else:
self.ckt.andN(bn, args)
self.ckt.decRefs([terms])
if check:
self.ckt.checkConstant(bn, 1)
if streamlineNode is not None and level == slevel:
self.ckt.decRefs([streamlineNode])
if not check:
names = circuit.Vec([BrentTerm(idx) for idx in indices])
self.ckt.comment("Find combined size for terms at level %d" % level)
self.ckt.information(names)
lastLevel = level
# Generate Brent equations
def generateBrentConstraints(self, kset = None, streamlineNode = None, check = False, breadthFirst = False, levelList = None, useZdd = False, fixKV = False, boundNonKernels = False):
ranges = self.fullRanges()
indices = indexExpand(ranges)
self.ckt.comment("Generate all Brent equations")
first = True
for idx in indices:
self.generateBrent(idx, kset = kset, check = check, useZdd = useZdd, fixKV = fixKV, boundNonKernels = boundNonKernels)
if first and not check:
first = False
name = circuit.Vec([BrentTerm(idx)])
self.ckt.comment("Find size of typical Brent term")
self.ckt.information(name)
if not check:
names = circuit.Vec([BrentTerm(idx) for idx in indices])
self.ckt.comment("Find combined size of all Brent terms")
self.ckt.information(names)
if breadthFirst:
self.bfGenerator(streamlineNode, check, levelList)
else:
self.dfGenerator(streamlineNode, check)
# Define kernel terms symbolically.
# Return vector for later dereferencing
def generateKernels(self):
self.ckt.comment("Define kernel terms")
klist = []
ijkList = indexExpand(self.dim)
for l in unitRange(self.auxCount):
for (i,j,k) in ijkList:
klist.append(KernelTerm(i, j, k, l))
for k in klist:
kname = k.symbol()
aname = str(k.alpha())
bname = str(k.beta())
cname = str(k.gamma())
self.ckt.andN(kname, [aname, bname, cname])
kvec = self.ckt.vec([k.symbol() for k in klist])
return kvec
def generateUniqueUsage(self, dest):
self.ckt.comment("Ensure that each kernel term appears in only one product")
ijkList = indexExpand(self.dim)
unodes = { (i,j,k) : "unique-i%d.j%d.k%d" % (i,j,k) for (i,j,k) in ijkList }
uvec = self.ckt.vec([unodes[(i,j,k)] for (i,j,k) in ijkList])
for (i,j,k) in ijkList:
klist = [KernelTerm(i,j,k,l).symbol() for l in unitRange(self.auxCount)]
nklist = ["!" + kt for kt in klist]
self.ckt.exactly1(unodes[(i,j,k)], circuit.Vec(klist), circuit.Vec(nklist))
self.ckt.andN(dest, uvec)
self.ckt.decRefs([uvec])
# Define terms that are NOT kernels symbolically.
# Return list of terms
def generateNonKernels(self):
self.ckt.comment("Generate nonkernel terms")
nklist = []
indexList = indexExpand(self.fullRanges())
for l in unitRange(self.auxCount):
for i1, i2, j1, j2, k1, k2 in indexList:
nkt = GeneralTerm(i1, i2, j1, j2, k1, k2, l)
if not nkt.isKernel():
nklist.append(nkt)
for nk in nklist:
name = nk.symbol()
aname = str(nk.alpha())
bname = str(nk.beta())
cname = str(nk.gamma())
self.ckt.andN(name, [aname, bname, cname])
return nklist
# Limit usage of nonkernel terms to 0 or 2
def generateBoundNonKernels(self, dest):
nklist = self.generateNonKernels()
slist = [nk.symbol() for nk in nklist]
nkvec = self.ckt.vec(slist)
self.ckt.comment("Ensure that each nonkernel term appears in either 0 or 2 products")
indexList = [nk.indices() for nk in nklist if nk.level == 1]
cnodes = { idx : "limit0or2-%d%d%d%d%d%d" % idx for idx in indexList }
cvec = self.ckt.vec(cnodes.values())
for idx in indexList:
tlist = [nk for nk in nklist if nk.indices() == idx]
tlist.sort(key = lambda nk : nk.level)
slist = [nk.symbol() for nk in tlist]
nslist = ['!' + s for s in slist]
self.ckt.okList(cnodes[idx], circuit.Vec(slist), circuit.Vec(nslist), [True, False, True])
self.ckt.decRefs([nkvec])
# Aggregate into smaller groups
range = self.fullRanges()
subrange = range[:3]
indices = [tuple(idx) for idx in indexExpand(subrange)]
agnodes = { idx : "limit0or2-%d%d%d***" % idx for idx in indices }
agvec = self.ckt.vec(agnodes.values())
for idx in indices:
subnodes = [cnodes[sidx] for sidx in indexList if sidx[:3] == idx]
self.ckt.andN(agnodes[idx], circuit.Vec(subnodes))
self.ckt.decRefs([cvec])
# Conjunct across these nodes
self.ckt.conjunctN(dest, agvec)
self.ckt.decRefs([agvec])
def generateMaxDouble(self, dest):
dcount = self.dim[0] * self.dim[1] * self.dim[2] - self.auxCount
self.ckt.comment("Ensure that first %d products have two kernel terms, and the remaining have one" % dcount)
ijkList = indexExpand(self.dim)
drange = unitRange(dcount)
srange = [l+dcount+1 for l in range(self.auxCount - dcount)]
dnodes = { l : "double-%.2d" % (l) for l in drange}
dvec = [dnodes[l] for l in drange]
snodes = { l : "single-%.2d" % l for l in srange }
svec = [snodes[l] for l in srange ]
uvec = self.ckt.vec(dvec + svec)
for l in drange:
klist = [KernelTerm(i, j, k, l).symbol() for (i,j,k) in ijkList]
kvec = circuit.Vec(klist)
nklist = ["!" + kt for kt in klist]
nkvec = circuit.Vec(nklist)
self.ckt.exactlyK(dnodes[l], kvec, nkvec, 2)
for l in srange:
klist = [KernelTerm(i, j, k, l).symbol() for (i,j,k) in ijkList]
kvec = circuit.Vec(klist)
nklist = ["!" + k for k in klist]
nkvec = circuit.Vec(nklist)
self.ckt.exactly1(snodes[l], kvec, nkvec)
self.ckt.andN(dest, uvec)
self.ckt.decRefs([uvec])
def generateSingletonExclusion(self, dest):
self.ckt.comment("Enforce singleton exclusion property")
dcount = self.dim[0] * self.dim[1] * self.dim[2] - self.auxCount
srange = [l+dcount+1 for l in range(self.auxCount - dcount)]
ijkList = indexExpand(self.dim)
xNodes = { l : "exclude-%.2d" % (l) for l in srange}
xvec = self.ckt.vec([xNodes[l] for l in srange])
| |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function wrappers for the TensorBox API"""
# pylint:disable=abstract-class-instantiated,unexpected-keyword-arg
import warnings
import numpy as np
from .tensorbox import TensorBox
def _get_multi_tensorbox(values):
"""Determines the correct framework to dispatch to given a
sequence of tensor-like objects.
Args:
values (Sequence[tensor_like]): a sequence of tensor like objects
Returns:
.TensorBox: A TensorBox that will dispatch to the correct framework
given the rules of precedence. This TensorBox will contain the *first*
tensor-like object in ``values`` that corresponds to the highest-priority
framework.
To determine the framework to dispatch to, the following rules
are applied:
* Tensors that are incompatible (such as Torch and TensorFlow tensors)
cannot both be present.
* Autograd tensors *may* be present alongside Torch and TensorFlow tensors,
but Torch and TensorFlow take precendence; the autograd arrays will
be treated as non-differentiable NumPy arrays. A warning will be raised
suggesting that vanilla NumPy be used instead.
* Vanilla NumPy arrays can be used alongside other tensor objects; they will
always be treated as non-differentiable constants.
"""
interfaces = [get_interface(v) for v in values]
if len(set(interfaces) - {"numpy", "autograd"}) > 1:
# contains multiple non-autograd interfaces
raise ValueError("Tensors contain mixed types; cannot determine dispatch library")
non_numpy_interfaces = set(interfaces) - {"numpy"}
if len(non_numpy_interfaces) > 1:
# contains autograd and another interface
warnings.warn(
f"Contains tensors of types {non_numpy_interfaces}; dispatch will prioritize "
"TensorFlow and PyTorch over autograd. Consider replacing Autograd with vanilla NumPy.",
UserWarning,
)
if "tf" in interfaces:
return TensorBox(values[interfaces.index("tf")])
if "torch" in interfaces:
return TensorBox(values[interfaces.index("torch")])
if "autograd" in interfaces:
return TensorBox(values[interfaces.index("autograd")])
return TensorBox(values[interfaces.index("numpy")])
def abs_(tensor):
"""Returns the element-wise absolute value.
Args:
tensor (tensor_like): input tensor
Returns:
tensor_like:
**Example**
>>> a = torch.tensor([1., -2.], requires_grad=True)
>>> abs(a)
tensor([1., 2.], grad_fn=<AbsBackward>)
"""
return TensorBox(tensor).abs(wrap_output=False)
def allequal(tensor1, tensor2, **kwargs):
"""Returns True if two tensors are element-wise equal along a given axis.
This function is equivalent to calling ``np.all(tensor1 == tensor2, **kwargs)``,
but allows for ``tensor1`` and ``tensor2`` to differ in type.
Args:
tensor1 (tensor_like): tensor to compare
tensor2 (tensor_like): tensor to compare
**kwargs: Accepts any keyword argument that is accepted by ``np.all``,
such as ``axis``, ``out``, and ``keepdims``. See the `NumPy documentation
<https://numpy.org/doc/stable/reference/generated/numpy.all.html>`__ for
more details.
Returns:
ndarray, bool: If ``axis=None``, a logical AND reduction is applied to all elements
and a boolean will be returned, indicating if all elements evaluate to True. Otherwise,
a boolean NumPy array will be returned.
**Example**
>>> a = torch.tensor([1, 2])
>>> b = np.array([1, 2])
>>> allequal(a, b)
True
"""
t1 = toarray(tensor1)
t2 = toarray(tensor2)
return np.all(t1 == t2, **kwargs)
def allclose(a, b, rtol=1e-05, atol=1e-08, **kwargs):
"""Wrapper around np.allclose, allowing tensors ``a`` and ``b``
to differ in type"""
t1 = toarray(a)
t2 = toarray(b)
return np.allclose(t1, t2, rtol=rtol, atol=atol, **kwargs)
allclose.__doc__ = np.allclose.__doc__
def angle(tensor):
"""Returns the element-wise angle of a complex tensor.
Args:
tensor (tensor_like): input tensor
Returns:
tensor_like:
**Example**
>>> a = torch.tensor([1.0, 1.0j, 1+1j], requires_grad=True)
>>> angle(a)
tensor([0.0000, 1.5708, 0.7854], grad_fn=<AngleBackward>)
"""
return TensorBox(tensor).angle(wrap_output=False)
def arcsin(tensor):
"""Returns the element-wise inverse sine of the tensor"""
return TensorBox(tensor).arcsin(wrap_output=False)
def cast(tensor, dtype):
"""Casts the given tensor to a new type.
Args:
tensor (tensor_like): tensor to cast
dtype (str, np.dtype): Any supported NumPy dtype representation; this can be
a string (``"float64"``), a ``np.dtype`` object (``np.dtype("float64")``), or
a dtype class (``np.float64``). If ``tensor`` is not a NumPy array, the
**equivalent** dtype in the dispatched framework is used.
Returns:
tensor_like: a tensor with the same shape and values as ``tensor`` and the
same dtype as ``dtype``
**Example**
We can use NumPy dtype specifiers:
>>> x = torch.tensor([1, 2])
>>> cast(x, np.float64)
tensor([1., 2.], dtype=torch.float64)
We can also use strings:
>>> x = tf.Variable([1, 2])
>>> cast(x, "complex128")
<tf.Tensor: shape=(2,), dtype=complex128, numpy=array([1.+0.j, 2.+0.j])>
"""
return TensorBox(tensor).cast(dtype, wrap_output=False)
def cast_like(tensor1, tensor2):
"""Casts a tensor to the same dtype as another.
Args:
tensor1 (tensor_like): tensor to cast
tensor2 (tensor_like): tensor with corresponding dtype to cast to
Returns:
tensor_like: a tensor with the same shape and values as ``tensor1`` and the
same dtype as ``tensor2``
**Example**
>>> x = torch.tensor([1, 2])
>>> y = torch.tensor([3., 4.])
>>> cast(x, y)
tensor([1., 2.])
"""
dtype = toarray(tensor2).dtype.type
return cast(tensor1, dtype)
def concatenate(values, axis=0):
"""Concatenate a sequence of tensors along the specified axis.
.. warning::
Tensors that are incompatible (such as Torch and TensorFlow tensors)
cannot both be present.
Args:
values (Sequence[tensor_like]): Sequence of tensor-like objects to
concatenate. The objects must have the same shape, except in the dimension corresponding
to axis (the first, by default).
axis (int): The axis along which the input tensors are concatenated. If axis is None,
tensors are flattened before use. Default is 0.
Returns:
tensor_like: The concatenated tensor.
**Example**
>>> x = tf.constant([0.6, 0.1, 0.6])
>>> y = tf.Variable([0.1, 0.2, 0.3])
>>> z = np.array([5., 8., 101.])
>>> concatenate([x, y, z])
TensorBox: <tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([6.00e-01, 1.00e-01, 6.00e-01, 1.00e-01, 2.00e-01, 3.00e-01, 5.00e+00, 8.00e+00, 1.01e+02], dtype=float32)>
"""
return _get_multi_tensorbox(values).concatenate(values, axis=axis, wrap_output=False)
def convert_like(tensor1, tensor2):
"""Convert a tensor to the same type as another.
Args:
tensor1 (tensor_like): tensor to convert
tensor2 (tensor_like): tensor with corresponding type to convert to
Returns:
tensor_like: a tensor with the same shape, values, and dtype as ``tensor1`` and the
same type as ``tensor2``.
**Example**
>>> x = np.array([1, 2])
>>> y = tf.Variable([3, 4])
>>> cast(x, y)
<tf.Tensor: shape=(2,), dtype=int64, numpy=array([1, 2])>
"""
return TensorBox(tensor2).astensor(tensor1)
def dot(tensor1, tensor2):
"""Returns the matrix or dot product of two tensors.
* If both tensors are 0-dimensional, elementwise multiplication
is performed and a 0-dimensional scalar returned.
* If both tensors are 1-dimensional, the dot product is returned.
* If the first array is 2-dimensional and the second array 1-dimensional,
the matrix-vector product is returned.
* If both tensors are 2-dimensional, the matrix product is returned.
* Finally, if the the first array is N-dimensional and the second array
M-dimensional, a sum product over the last dimension of the first array,
and the second-to-last dimension of the second array is returned.
Args:
tensor1 (tensor_like): input tensor
tensor2 (tensor_like): input tensor
"""
return _get_multi_tensorbox([tensor1, tensor2]).dot(tensor1, tensor2, wrap_output=False)
def expand_dims(tensor, axis):
"""Expand the shape of an array by adding a new dimension of size 1
at the specified axis location.
.. warning::
This function differs from ``np.expand_dims``.
Args:
tensor (tensor_like): tensor to expand
axis (int): location in the axes to place the new dimension
Returns:
tensor_like: a tensor with the expanded shape
**Example**
>>> x = tf.Variable([3, 4])
>>> expand_dims(x, axis=1)
<tf.Tensor: shape=(2, 1), dtype=int32, numpy=
array([[3],
[4]], dtype=int32)>
"""
return TensorBox(tensor).expand_dims(axis, wrap_output=False)
def get_interface(tensor):
"""Returns the name of the package that any array/tensor manipulations
will dispatch to. The returned strings correspond to those used for PennyLane
:doc:`interfaces </introduction/interfaces>`.
Args:
tensor (tensor_like): tensor input
Returns:
str: name of the interface
**Example**
>>> x = torch.tensor([1., 2.])
>>> get_interface(x)
'torch'
>>> from pennylane import numpy as np
>>> x = np.array([4, 5], requires_grad=True)
>>> get_interface(x)
'autograd'
"""
return TensorBox(tensor).interface
def toarray(tensor):
"""Returns the tensor as a NumPy ``ndarray``. No copying
is performed; the tensor and the returned array share the
same storage.
Args:
tensor (tensor_like): input tensor
Returns:
array: a ``ndarray`` view into the same data
**Example**
>>> x = torch.tensor([1., 2.])
>>> toarray(x)
array([1, 2])
"""
return TensorBox(tensor).numpy()
def ones_like(tensor, dtype=None):
"""Returns a tensor of all ones with the same shape and dtype
as the input tensor.
Args:
tensor (tensor_like): input tensor
dtype (str, np.dtype): The desired output datatype of the array. If not provided, the dtype of
``tensor`` is used. This argument can | |
<reponame>InsDev/steam.py<filename>steam/abc.py
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015 <NAME> <<EMAIL>>
Copyright (c) 2020 James
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This contains a copy of
https://github.com/ValvePython/steam/blob/master/steam/steamid.py
"""
import abc
import asyncio
import re
from datetime import datetime
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, SupportsInt, Tuple, Union, overload
from typing_extensions import Final, Literal
from .badge import UserBadges
from .comment import Comment
from .enums import (
ECommunityVisibilityState,
EInstanceFlag,
EPersonaState,
EPersonaStateFlag,
EResult,
EType,
ETypeChar,
EUniverse,
)
from .errors import WSException
from .game import Game
from .iterators import CommentsIterator
from .models import Ban, community_route
from .trade import Inventory
from .utils import (
_INVITE_HEX,
_INVITE_MAPPING,
ETypeType,
EUniverseType,
InstanceType,
IntOrStr,
id64_from_url,
make_id64,
)
if TYPE_CHECKING:
from aiohttp import ClientSession
from .clan import Clan
from .group import Group
from .http import StrOrURL
from .image import Image
from .state import ConnectionState
from .user import User
__all__ = (
"SteamID",
"Message",
)
# fmt: off
# not sure if this a PyCharm issue but it appears that these aren't correctly recognised here
IntOrStr = Union[int, str]
ETypeType = Union[
EType,
Literal[
"Invalid", "Individual",
"Multiseat", "GameServer",
"AnonGameServer", "Pending",
"ContentServer", "Clan",
"Chat", "ConsoleUser",
"AnonUser", "Max",
0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11,
],
]
EUniverseType = Union[EUniverse, Literal["Invalid ", "Public", "Beta", "Internal", "Dev", "Max", 0, 1, 2, 3, 4, 5, 6,]]
InstanceType = Literal[0, 1]
# fmt: on
class SteamID(metaclass=abc.ABCMeta):
"""Convert a Steam ID between its various representations."""
__slots__ = (
"__BASE",
"__weakref__",
)
@overload
def __init__(self):
...
@overload
def __init__(self, id: IntOrStr):
...
@overload
def __init__(self, id: IntOrStr, type: ETypeType):
...
@overload
def __init__(self, id: IntOrStr, type: ETypeType, universe: EUniverseType):
...
@overload
def __init__(self, id: IntOrStr, type: ETypeType, universe: EUniverseType, instance: InstanceType):
...
def __init__(self, *args, **kwargs):
self.__BASE: Final[int] = make_id64(*args, **kwargs)
def __int__(self):
return self.__BASE
def __eq__(self, other: SupportsInt):
try:
return int(self) == int(other)
except (TypeError, ValueError):
return NotImplemented
def __str__(self):
return str(int(self))
def __hash__(self):
return hash(self.__BASE)
def __repr__(self):
return f"SteamID(id={self.id}, type={self.type}, universe={self.universe}, instance={self.instance})"
@property
def instance(self) -> int:
""":class:`int`: The instance of the SteamID."""
return (int(self) >> 32) & 0xFFFFF
@property
def type(self) -> EType:
""":class:`~steam.EType`: The Steam type of the SteamID."""
return EType((int(self) >> 52) & 0xF)
@property
def universe(self) -> EUniverse:
""":class:`~steam.EUniverse`: The Steam universe of the SteamID."""
return EUniverse((int(self) >> 56) & 0xFF)
@property
def id64(self) -> int:
""":class:`int`: The SteamID's 64 bit ID."""
return int(self)
@property
def id(self) -> int:
""":class:`int`: The SteamID's 32 bit ID."""
return int(self) & 0xFFFFFFFF
@property
def id2(self) -> str:
""":class:`str`: The SteamID's ID 2.
e.g ``STEAM_1:0:1234``.
"""
return f"STEAM_{int(self.universe)}:{self.id % 2}:{self.id >> 1}"
@property
def id2_zero(self) -> str:
""":class:`str`: The SteamID's ID 2 accounted for bugged GoldSrc and Orange Box games. In these games the
accounts :attr:`universe`, ``1`` for :class:`.EType.Public`, should be the ``X`` component of ``STEAM_X:0:1234``
however, this was bugged and the value of ``X`` was ``0``.
e.g ``STEAM_0:0:1234``.
"""
return self.id2.replace("_1", "_0")
@property
def id3(self) -> str:
""":class:`str`: The SteamID's ID 3.
e.g ``[U:1:1234]``.
"""
type_char = ETypeChar(self.type).name
instance = None
if self.type in (EType.AnonGameServer, EType.Multiseat):
instance = self.instance
elif self.type == EType.Individual:
if self.instance != 1:
instance = self.instance
elif self.type == EType.Chat:
if self.instance & EInstanceFlag.Clan:
type_char = "c"
elif self.instance & EInstanceFlag.Lobby:
type_char = "L"
else:
type_char = "T"
parts = [type_char, int(self.universe), self.id]
if instance is not None:
parts.append(instance)
return f'[{":".join(map(str, parts))}]'
@property
def invite_code(self) -> Optional[str]:
"""Optional[:class:`str`]: The SteamID's invite code in the s.team invite code format.
e.g. ``cv-dgb``.
"""
if self.type == EType.Individual and self.is_valid():
def repl_mapper(x: re.Match):
return _INVITE_MAPPING[x.group()]
invite_code = re.sub(f"[{_INVITE_HEX}]", repl_mapper, f"{self.id:x}")
split_idx = len(invite_code) // 2
if split_idx:
invite_code = f"{invite_code[:split_idx]}-{invite_code[split_idx:]}"
return invite_code
@property
def invite_url(self) -> Optional[str]:
"""Optional[:class:`str`]: The SteamID's full invite code URL.
e.g ``https://s.team/p/cv-dgb``.
"""
code = self.invite_code
if code:
return f"https://s.team/p/{code}"
@property
def community_url(self) -> Optional[str]:
"""Optional[:class:`str`]: The SteamID's community url.
e.g https://steamcommunity.com/profiles/123456789.
"""
suffix = {
EType.Individual: "profiles",
EType.Clan: "gid",
}
try:
return f"https://steamcommunity.com/{suffix[self.type]}/{self.id64}"
except KeyError:
pass
def is_valid(self) -> bool:
""":class:`bool`: Whether or not the SteamID would be valid."""
if self.type == EType.Invalid or self.type >= EType.Max:
return False
if self.universe == EUniverse.Invalid or self.universe >= EUniverse.Max:
return False
if self.type == EType.Individual:
if self.id == 0 or self.instance > 4:
return False
if self.type == EType.Clan:
if self.id == 0 or self.instance != 0:
return False
if self.type == EType.GameServer:
if self.id == 0:
return False
if self.type == EType.AnonGameServer:
if self.id == 0 and self.instance == 0:
return False
return True
@classmethod
async def from_url(
cls, url: "StrOrURL", session: Optional["ClientSession"] = None, timeout: float = 30
) -> Optional["SteamID"]:
"""|coro|
A helper function creates a SteamID instance from a Steam community url. See :func:`id64_from_url` for details.
Parameters
----------
url: Union[:class:`str`, :class:`yarl.URL`]
The Steam community url to fetch.
session: Optional[:class:`aiohttp.ClientSession`]
The session to make the request with. If ``None`` is passed a new one is generated.
timeout: :class:`float`
How long to wait for a response before returning ``None``.
Returns
-------
Optional[:class:`SteamID`]
:class:`SteamID` instance or ``None``.
"""
id64 = await id64_from_url(url, session, timeout)
return cls(id64) if id64 else None
class BaseUser(SteamID):
"""An ABC that details the common operations on a Steam user.
The following classes implement this ABC:
- :class:`~steam.User`
- :class:`~steam.ClientUser`
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: str(x)
Returns the user's name.
Attributes
----------
name: :class:`str`
The user's username.
state: :class:`~steam.EPersonaState`
The current persona state of the account (e.g. LookingToTrade).
game: Optional[:class:`~steam.Game`]
The Game instance attached to the user. Is ``None`` if the user isn't in a game or one that is recognised by the
api.
primary_clan: Optional[:class:`SteamID`]
The primary clan the User displays on their profile.
avatar_url: :class:`str`
The avatar url of the user. Uses the large (184x184 px) image url.
real_name: Optional[:class:`str`]
The user's real name defined by them. Could be ``None``.
created_at: Optional[:class:`datetime.datetime`]
The time at which the user's account was created. Could be ``None``.
last_logoff: Optional[:class:`datetime.datetime`]
The last time the user logged into steam. Could be None (e.g. if they are currently online).
country: Optional[:class:`str`]
The country code of the account. Could be ``None``.
flags: List[:class:`~steam.EPersonaStateFlag`]
The persona state flags of the account.
"""
__slots__ = (
"name",
"game",
"state",
"flags",
"country",
"primary_clan",
"trade_url",
"real_name",
"avatar_url",
"last_seen_online",
"created_at",
"last_logoff",
"last_logon",
"privacy_state",
"_state",
"_is_commentable",
"_setup_profile",
)
def __init__(self, state: "ConnectionState", data: dict):
super().__init__(data["steamid"])
self._state = state
self.name: Optional[str] = None
self.real_name: Optional[str] = None
self.avatar_url: Optional[str] = None
self.primary_clan: Optional[SteamID] = None
self.country: Optional[str] = None
self.created_at: Optional[datetime] = None
self.last_logoff: Optional[datetime] = None
self.last_logon: Optional[datetime] = None
self.last_seen_online: Optional[datetime] = None
self.game: Optional[Game] = None
self.state: Optional[EPersonaState] = None
self.flags: List[EPersonaStateFlag] = []
self.privacy_state: Optional[ECommunityVisibilityState] = None
self._update(data)
def __repr__(self):
attrs = ("name", "state", "id", "type", "universe", "instance")
resolved = [f"{attr}={getattr(self, attr)!r}" for attr in attrs]
return f"<User {' '.join(resolved)}>"
def __str__(self):
return self.name
def _update(self, data) -> None:
self.name = data["personaname"]
self.real_name = data.get("realname") or self.real_name
self.avatar_url = data.get("avatarfull") or self.avatar_url
self.trade_url = community_route(f"tradeoffer/new/?partner={self.id}")
self.primary_clan = SteamID(data["primaryclanid"]) if "primaryclanid" in data else self.primary_clan
self.country = data.get("loccountrycode") or self.country
self.created_at = datetime.utcfromtimestamp(data["timecreated"]) if "timecreated" in data else | |
<reponame>fakepop/hubspot-api-python<filename>hubspot/files/files/models/import_from_url_input.py<gh_stars>0
# coding: utf-8
"""
Files
Upload and manage files. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.files.files.configuration import Configuration
class ImportFromUrlInput(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"access": "str",
"ttl": "str",
"name": "str",
"url": "str",
"folder_id": "str",
"folder_path": "str",
"duplicate_validation_strategy": "str",
"duplicate_validation_scope": "str",
"overwrite": "bool",
}
attribute_map = {
"access": "access",
"ttl": "ttl",
"name": "name",
"url": "url",
"folder_id": "folderId",
"folder_path": "folderPath",
"duplicate_validation_strategy": "duplicateValidationStrategy",
"duplicate_validation_scope": "duplicateValidationScope",
"overwrite": "overwrite",
}
def __init__(
self,
access=None,
ttl=None,
name=None,
url=None,
folder_id=None,
folder_path=None,
duplicate_validation_strategy=None,
duplicate_validation_scope=None,
overwrite=None,
local_vars_configuration=None,
): # noqa: E501
"""ImportFromUrlInput - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access = None
self._ttl = None
self._name = None
self._url = None
self._folder_id = None
self._folder_path = None
self._duplicate_validation_strategy = None
self._duplicate_validation_scope = None
self._overwrite = None
self.discriminator = None
self.access = access
if ttl is not None:
self.ttl = ttl
if name is not None:
self.name = name
self.url = url
if folder_id is not None:
self.folder_id = folder_id
if folder_path is not None:
self.folder_path = folder_path
self.duplicate_validation_strategy = duplicate_validation_strategy
self.duplicate_validation_scope = duplicate_validation_scope
self.overwrite = overwrite
@property
def access(self):
"""Gets the access of this ImportFromUrlInput. # noqa: E501
PUBLIC_INDEXABLE: File is publicly accessible by anyone who has the URL. Search engines can index the file. PUBLIC_NOT_INDEXABLE: File is publicly accessible by anyone who has the URL. Search engines *can't* index the file. PRIVATE: File is NOT publicly accessible. Requires a signed URL to see content. Search engines *can't* index the file. # noqa: E501
:return: The access of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._access
@access.setter
def access(self, access):
"""Sets the access of this ImportFromUrlInput.
PUBLIC_INDEXABLE: File is publicly accessible by anyone who has the URL. Search engines can index the file. PUBLIC_NOT_INDEXABLE: File is publicly accessible by anyone who has the URL. Search engines *can't* index the file. PRIVATE: File is NOT publicly accessible. Requires a signed URL to see content. Search engines *can't* index the file. # noqa: E501
:param access: The access of this ImportFromUrlInput. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and access is None
): # noqa: E501
raise ValueError(
"Invalid value for `access`, must not be `None`"
) # noqa: E501
allowed_values = [
"PUBLIC_INDEXABLE",
"PUBLIC_NOT_INDEXABLE",
"PRIVATE",
] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and access not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `access` ({0}), must be one of {1}".format( # noqa: E501
access, allowed_values
)
)
self._access = access
@property
def ttl(self):
"""Gets the ttl of this ImportFromUrlInput. # noqa: E501
Time to live. If specified the file will be deleted after the given time frame. # noqa: E501
:return: The ttl of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._ttl
@ttl.setter
def ttl(self, ttl):
"""Sets the ttl of this ImportFromUrlInput.
Time to live. If specified the file will be deleted after the given time frame. # noqa: E501
:param ttl: The ttl of this ImportFromUrlInput. # noqa: E501
:type: str
"""
self._ttl = ttl
@property
def name(self):
"""Gets the name of this ImportFromUrlInput. # noqa: E501
Name to give the resulting file in the file manager. # noqa: E501
:return: The name of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ImportFromUrlInput.
Name to give the resulting file in the file manager. # noqa: E501
:param name: The name of this ImportFromUrlInput. # noqa: E501
:type: str
"""
self._name = name
@property
def url(self):
"""Gets the url of this ImportFromUrlInput. # noqa: E501
URL to download the new file from. # noqa: E501
:return: The url of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this ImportFromUrlInput.
URL to download the new file from. # noqa: E501
:param url: The url of this ImportFromUrlInput. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and url is None
): # noqa: E501
raise ValueError(
"Invalid value for `url`, must not be `None`"
) # noqa: E501
self._url = url
@property
def folder_id(self):
"""Gets the folder_id of this ImportFromUrlInput. # noqa: E501
One of folderId or folderPath is required. Destination folder ID for the uploaded file. # noqa: E501
:return: The folder_id of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""Sets the folder_id of this ImportFromUrlInput.
One of folderId or folderPath is required. Destination folder ID for the uploaded file. # noqa: E501
:param folder_id: The folder_id of this ImportFromUrlInput. # noqa: E501
:type: str
"""
self._folder_id = folder_id
@property
def folder_path(self):
"""Gets the folder_path of this ImportFromUrlInput. # noqa: E501
One of folderPath or folderId is required. Destination folder path for the uploaded file. If the folder path does not exist, there will be an attempt to create the folder path. # noqa: E501
:return: The folder_path of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._folder_path
@folder_path.setter
def folder_path(self, folder_path):
"""Sets the folder_path of this ImportFromUrlInput.
One of folderPath or folderId is required. Destination folder path for the uploaded file. If the folder path does not exist, there will be an attempt to create the folder path. # noqa: E501
:param folder_path: The folder_path of this ImportFromUrlInput. # noqa: E501
:type: str
"""
self._folder_path = folder_path
@property
def duplicate_validation_strategy(self):
"""Gets the duplicate_validation_strategy of this ImportFromUrlInput. # noqa: E501
NONE: Do not run any duplicate validation. REJECT: Reject the upload if a duplicate is found. RETURN_EXISTING: If a duplicate file is found, do not upload a new file and return the found duplicate instead. # noqa: E501
:return: The duplicate_validation_strategy of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._duplicate_validation_strategy
@duplicate_validation_strategy.setter
def duplicate_validation_strategy(self, duplicate_validation_strategy):
"""Sets the duplicate_validation_strategy of this ImportFromUrlInput.
NONE: Do not run any duplicate validation. REJECT: Reject the upload if a duplicate is found. RETURN_EXISTING: If a duplicate file is found, do not upload a new file and return the found duplicate instead. # noqa: E501
:param duplicate_validation_strategy: The duplicate_validation_strategy of this ImportFromUrlInput. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and duplicate_validation_strategy is None
): # noqa: E501
raise ValueError(
"Invalid value for `duplicate_validation_strategy`, must not be `None`"
) # noqa: E501
allowed_values = ["NONE", "REJECT", "RETURN_EXISTING"] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and duplicate_validation_strategy not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `duplicate_validation_strategy` ({0}), must be one of {1}".format( # noqa: E501
duplicate_validation_strategy, allowed_values
)
)
self._duplicate_validation_strategy = duplicate_validation_strategy
@property
def duplicate_validation_scope(self):
"""Gets the duplicate_validation_scope of this ImportFromUrlInput. # noqa: E501
ENTIRE_PORTAL: Look for a duplicate file in the entire account. EXACT_FOLDER: Look for a duplicate file in the provided folder. # noqa: E501
:return: The duplicate_validation_scope of this ImportFromUrlInput. # noqa: E501
:rtype: str
"""
return self._duplicate_validation_scope
@duplicate_validation_scope.setter
def duplicate_validation_scope(self, duplicate_validation_scope):
"""Sets the duplicate_validation_scope of this ImportFromUrlInput.
ENTIRE_PORTAL: Look for a duplicate file in the entire account. EXACT_FOLDER: Look for a duplicate file in the provided folder. # noqa: E501
:param duplicate_validation_scope: The duplicate_validation_scope of this ImportFromUrlInput. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and duplicate_validation_scope is None
): # noqa: E501
raise ValueError(
"Invalid value for `duplicate_validation_scope`, must not be `None`"
) # noqa: E501
allowed_values = ["ENTIRE_PORTAL", "EXACT_FOLDER"] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and duplicate_validation_scope not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `duplicate_validation_scope` ({0}), must be one of {1}".format( | |
# ??? 第一个参数self.wsgi_app其实是Flask类方法。。
self.wsgi_app = SharedDataMiddleware(self.wsgi_app, {
self.static_path: target
})
#: the Jinja2 environment. It is created from the
#: :attr:`jinja_options` and the loader that is returned
#: by the :meth:`create_jinja_loader` function.
self.jinja_env = Environment(loader=self.create_jinja_loader(),
**self.jinja_options)
self.jinja_env.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages
)
def create_jinja_loader(self):
"""Creates the Jinja loader. By default just a package loader for
the configured package is returned that looks up templates in the
`templates` folder. To add other loaders it's possible to
override this method.
"""
if pkg_resources is None:
return FileSystemLoader(os.path.join(self.root_path, 'templates'))
return PackageLoader(self.package_name)
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session and g into the template context.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
reqctx = _request_ctx_stack.top
for func in self.template_context_processors:
context.update(func())
def run(self, host='localhost', port=5000, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
:param host: the hostname to listen on. set this to ``'0.0.0.0'``
to have the server available externally as well.
:param port: the port of the webserver
:param options: the options to be forwarded to the underlying
Werkzeug server. See :func:`werkzeug.run_simple`
for more information.
"""
from werkzeug import run_simple
# ??? 在调用run的时候可以传入参数debut, IDE会有参数提示
if 'debug' in options:
self.debug = options.pop('debug')
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
return run_simple(host, port, self, **options)
def test_client(self):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
"""
from werkzeug import Client
return Client(self, self.response_class, use_cookies=True)
def open_resource(self, resource):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schemal.sql
/static
/style.css
/template
/layout.html
/index.html
If you want to open the `schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
"""
if pkg_resources is None:
return open(os.path.join(self.root_path, resource), 'rb')
return pkg_resources.resource_stream(self.package_name, resource)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set.
:param request: an instance of :attr:`request_class`.
"""
key = self.secret_key
if key is not None:
return SecureCookie.load_cookie(request, self.session_cookie_name,
secret_key=key)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
if session is not None:
session.save_cookie(response, self.session_cookie_name)
def add_url_rule(self, rule, endpoint, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator but does not register the view function for the endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('index', '/')
app.view_functions['index'] = index
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object
"""
# ??? options可能的值有?
# {'endpoint': 'test', 'methods': ['GET']}
options['endpoint'] = endpoint
options.setdefault('methods', ('GET',))
self.url_map.add(Rule(rule, **options))
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. Example::
@app.route('/')
def index():
return 'Hello World'
Variables parts in the route can be specified with angular
brackets (``/user/<username>``). By default a variable part
in the URL accepts any string without a slash however a different
converter can be specified as well by using ``<converter:name>``.
Variable parts are passed to the view function as keyword
arguments.
The following converters are possible:
=========== ===========================================
`int` accepts integers
`float` like `int` but for floating point values
`path` like the default but also accepts slashes
=========== ===========================================
Here some examples::
@app.route('/')
def index():
pass
@app.route('/<username>')
def show_user(username):
pass
@app.route('/post/<int:post_id>')
def show_post(post_id):
pass
An important detail to keep in mind is how Flask deals with trailing
slashes. The idea is to keep each URL unique so the following rules
apply:
1. If a rule ends with a slash and is requested without a slash
by the user, the user is automatically redirected to the same
page with a trailing slash attached.
2. If a rule does not end with a trailing slash and the user request
the page with a trailing slash, a 404 not found is raised.
This is consistent with how web servers deal with static files. This
also makes it possible to use relative link targets safely.
The :meth:`route` decorator accepts a couple of other arguments
as well:
:param rule: the URL rule as string
:param methods: a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
:param subdomain: specifies the rule for the subdoain in case
subdomain matching is in use.
:param strict_slashes: can be used to disable the strict slashes
setting for this rule. See above.
:param options: other options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object.
"""
def decorator(f):
self.add_url_rule(rule, f.__name__, **options)
self.view_functions[f.__name__] = f
return f
return decorator
def errorhandler(self, code):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found():
return 'This page does not exist', 404
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found():
return 'This page does not exist', 404
app.error_handlers[404] = page_not_found
:param code: the code as integer for the handler
"""
def decorator(f):
self.error_handlers[code] = f
return f
return decorator
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.append(f)
return f
def after_request(self, f):
"""Register a function to be run after each request."""
self.after_request_funcs.append(f)
return f
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors.append(f)
return f
def match_request(self):
"""Matches the current request against the URL map and also
stores the endpoint and view arguments on the request object
is successful, otherwise the exception is stored.
"""
rv = _request_ctx_stack.top.url_adapter.match()
request.endpoint, request.view_args = rv
return rv
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
"""
try:
endpoint, values = self.match_request()
return self.view_functions[endpoint](**values)
except HTTPException, e:
handler = self.error_handlers.get(e.code)
if handler is None:
return e
return handler(e)
except Exception, e:
handler = self.error_handlers.get(500)
if self.debug or handler is None:
raise
return handler(e)
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowd for `rv`:
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
:class:`tuple` the response object is created with the
contents of the tuple as arguments
a WSGI function the function is called as WSGI application
and buffered as response object
======================= ===========================================
:param rv: the return value from the view function
"""
if isinstance(rv, self.response_class):
return rv
if isinstance(rv, basestring):
return self.response_class(rv)
if isinstance(rv, tuple):
return self.response_class(*rv)
return self.response_class.force_type(rv, request.environ)
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
"""
for func in self.before_request_funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in | |
'braquial (mm/aaaa)?', response.content.decode())
@staticmethod
def _get_q_default_language_or_first(questionnaire):
# TODO: correct this to adapt to unique QuestionnaireDefaultLanguage
# model with OneToOne with Questionnaire
qdl = QuestionnaireDefaultLanguage.objects.filter(
questionnaire=questionnaire
).first()
if qdl:
return qdl.questionnaire_language
else:
return QuestionnaireLanguage.objects.filter(
questionnaire=questionnaire
).first()
def test_access_experiment_detail_after_GET_experiment(self):
slug = str(Experiment.objects.first().slug)
response = self.client.get('/experiments/' + slug + '/')
self.assertEqual(response.status_code, 200)
def test_access_experiment_detail_with_wrong_url_returns_404_http_status_code(self):
slug = str(Experiment.objects.first().slug)[:-3]
response = self.client.get('/experiments/' + slug + '/')
self.assertEqual(response.status_code, 404)
self.assertIn('404 - Not Found', response.content.decode())
self.assertIn(
'Ops... It seems you tried to access an inexistent page.',
response.content.decode()
)
self.assertIn(
'Neuroscience Experiments Database', response.content.decode(),
)
self.assertIn('Related Projects', response.content.decode())
def test_uses_detail_template(self):
slug = str(Experiment.objects.first().slug)
response = self.client.get('/experiments/' + slug + '/')
self.assertTemplateUsed(response, 'experiments/detail.html')
def test_access_experiment_detail_returns_questionnaire_data_for_default_or_first_language(self):
experiment = Experiment.objects.last()
g1 = create_group(1, experiment)
g2 = create_group(1, experiment)
create_experiment_related_objects(experiment)
create_valid_questionnaires([g1, g2])
response = self.client.get('/experiments/' + experiment.slug + '/')
for group in experiment.groups.all():
self.assertContains(
response,
'Questionnaires for group ' + group.title
)
for questionnaire in group.steps.filter(type=Step.QUESTIONNAIRE):
# The rule is display default questionnaire language data or
# first questionnaire language data if not set default
# questionnaire language. So we mimic the function
# _get_q_default_language_or_first from views that do that.
# TODO: In tests helper we always create default
# TODO: questionnaire language as English. So we would test
# TODO: only if we had first language.
q_language = self._get_q_default_language_or_first(
questionnaire
)
self.assertContains(
response, 'Questionnaire ' + q_language.survey_name
)
# Sample asserts for first questionnaire
# (in Portuguese, as first questionnaire, first language, created in
# tests helper is in Portuguese).
self._asserts_for_first_questionnaire(response)
# sample asserts for second questionnaire
self._asserts_for_second_questionnaire(response)
# sample asserts for third questionnaire
self._asserts_for_third_questionnaire(response)
def test_access_experiment_detail_returns_questionnaire_data_for_other_language(self):
# TODO!
pass
def test_access_experiment_without_questionnaires_returns_null_questionnaires(self):
# First experiment has not questionnaires. See tests helper
experiment = Experiment.objects.first()
response = self.client.get('/experiments/' + experiment.slug + '/')
self.assertFalse(response.context['questionnaires'])
def test_access_experiment_with_invalid_questionnaire_returns_invalid_questionnaire(self):
group = create_group(1, self.experiment)
q = create_questionnaire(1, 'q4', group)
create_questionnaire_language(
q, settings.BASE_DIR + '/experiments/tests/questionnaire4.csv',
'en'
)
response = self.client.get(
'/experiments/' + self.experiment.slug + '/'
)
self.assertEqual(
response.context['questionnaires'][group.title][q.id]['survey_metadata'],
'invalid_questionnaire'
)
def test_access_experiment_with_one_valid_questionnaire_and_other_invalid(self):
experiment = Experiment.objects.last()
g1 = create_group(1, experiment)
g2 = create_group(1, experiment)
q1 = create_questionnaire(1, 'q1', g1)
# invalid questionnaire
create_questionnaire_language(
q1,
settings.BASE_DIR + '/experiments/tests/questionnaire4.csv',
'pt-BR'
)
# valid questionnaire
q2 = create_questionnaire(1, 'q2', g2)
create_questionnaire_language(
q2,
settings.BASE_DIR + '/experiments/tests/questionnaire1_pt-br.csv',
'en'
)
response = self.client.get('/experiments/' + experiment.slug + '/')
self.assertEqual(
response.context['questionnaires'][g1.title][q1.id][
'survey_metadata'], 'invalid_questionnaire'
)
self.assertNotEqual(
response.context['questionnaires'][g2.title][q2.id][
'survey_metadata'], 'invalid_questionnaire'
)
def test_experiment_detail_page_has_change_slug_form(self):
experiment = create_experiment(1)
response = self.client.get('/experiments/' + experiment.slug + '/')
self.assertIsInstance(response.context['form'], ChangeSlugForm)
def test_access_experiment_wo_other_experiment_versions_returns_no_other(self):
response = self.client.get('/experiments/' + self.experiment.slug + '/')
# other_versions are the other experiments versions
self.assertFalse(response.context['other_versions'])
def test_access_experiment_with_other_experiment_versions_returns_other_versions(self):
experiment_v2 = create_next_version_experiment(self.experiment)
experiment_v3 = create_next_version_experiment(experiment_v2)
response = self.client.get('/experiments/' + experiment_v3.slug + '/')
# other_versions are the other experiments versions
self.assertEqual(len(response.context['other_versions']), 2)
class ChangeExperimentSlugTest(TestCase):
def setUp(self):
trustee = create_trustee_user('claudia')
group = auth.models.Group.objects.get(name='trustees')
permission = Permission.objects.get(codename='change_slug')
group.permissions.add(permission)
trustee_user = User.objects.get(username=trustee.username)
self.client.login(username=trustee_user.username, password=PASSWORD)
create_experiment(1)
def test_change_slug_url_resolves_to_change_slug_view(self):
experiment = Experiment.objects.first()
found = resolve(
'/experiments/' + str(experiment.id) + '/change_slug/'
)
self.assertEqual(found.func, change_slug)
def test_POSTing_new_slug_returns_redirect_to_experiment_detail_page(self):
experiment = Experiment.objects.first()
response = self.client.post(
'/experiments/' + str(experiment.id) + '/change_slug/',
{'slug': 'a-brand_new-slug'}
)
self.assertEqual(response.status_code, 302)
def test_cannot_POST_slug_if_not_in_staff(self):
# logout from the system, as we alread is logged in in setUp method
self.client.logout()
experiment_before = Experiment.objects.first()
new_slug = 'a-brand_new-slug'
response = self.client.post(
'/experiments/' + str(experiment_before.id) + '/change_slug/',
{'slug': new_slug}
)
self.assertEqual(response.status_code, 403)
# get the experiment after posting new slug without permission
experiment_after = Experiment.objects.first()
self.assertNotEqual(experiment_after.slug, new_slug)
def test_POSTing_a_valid_first_version_slug_saves_new_slug_correctly(self):
experiment = Experiment.objects.first()
response = self.client.post(
'/experiments/' + str(experiment.id) + '/change_slug/',
{'slug': 'a-brand_new-slug-for-version-1'},
follow=True
)
experiment = Experiment.objects.first()
self.assertEqual('a-brand_new-slug-for-version-1', experiment.slug)
message = list(response.context['messages'])[0]
self.assertEqual(
message.message,
"The experiment's slug was modified"
)
self.assertEqual(message.tags, "success")
def test_POSTing_a_valid_n_experiment_version_changes_all_slugs_correctly(
self):
experiment = Experiment.objects.first()
experiment_v2 = create_next_version_experiment(experiment)
experiment_v3 = create_next_version_experiment(experiment_v2)
experiment_v4 = create_next_version_experiment(experiment_v3)
self.client.post(
'/experiments/' + str(experiment_v4.id) +
'/change_slug/',
{'slug': 'new-slug-for-version-4'}
)
for experiment in Experiment.objects.all():
version = experiment.version
version_suffix = '-v' + str(version) if version > 1 else ''
self.assertEqual(
'new-slug-for-version-4' + version_suffix,
experiment.slug
)
def test_POSTing_empty_slug_returns_error_message(self):
# TODO: we want to display message as errors in form not as normal
# TODO: messages
experiment_before = Experiment.objects.first()
response = self.client.post(
'/experiments/' + str(experiment_before.id) + '/change_slug/',
{'slug': ''}, follow=True
)
experiment_after = Experiment.objects.first()
self.assertEqual(experiment_before.slug, experiment_after.slug)
message = list(response.context['messages'])[0]
self.assertEqual(
message.message,
'Empty slugs is not allowed. Please enter a valid slug'
)
self.assertEqual(message.tags, "error")
def test_submit_non_unique_slug_displays_error_message(self):
# TODO: we want to display message as errors in form not as normal
# TODO: messages; make test in test_forms too
experiment_before = Experiment.objects.first()
other_experiment = create_experiment(1)
response = self.client.post(
'/experiments/' + str(experiment_before.id) + '/change_slug/',
{'slug': other_experiment.slug}, follow=True
)
experiment_after = Experiment.objects.first()
self.assertEqual(experiment_before.slug, experiment_after.slug)
message = list(response.context['messages'])[0]
self.assertEqual(
message.message,
'The slug entered is equal to other experiment slug. Please try '
'again.'
)
self.assertEqual(message.tags, "error")
def test_POSTing_invalid_slug_returns_error_message(self):
# generates random string to post random utf-8 slug
# TODO: verify if function is returning correct string
slug = random_utf8_string(random.randint(1, 50))
experiment_before = Experiment.objects.first()
response = self.client.post(
'/experiments/' + str(experiment_before.id) + '/change_slug/',
{'slug': slug}, follow=True
)
experiment_after = Experiment.objects.first()
self.assertEqual(experiment_before.slug, experiment_after.slug)
message = list(response.context['messages'])[0]
self.assertEqual(
message.message,
'The slug entered is not allowed. Please enter a valid slug. '
'Type only lowcase letters without accents, numbers, dash, '
'and underscore signs'
)
self.assertEqual(message.tags, "error")
def test_POSTing_slug_with_less_than_three_characters_returns_error_message(self):
experiment_before = Experiment.objects.first()
response = self.client.post(
'/experiments/' + str(experiment_before.id) + '/change_slug/',
{'slug': 'ab'}, follow=True
)
experiment_after = Experiment.objects.first()
self.assertEqual(experiment_before.slug, experiment_after.slug)
message = list(response.context['messages'])[0]
self.assertEqual(
message.message,
'The slug entered is two small. Please enter at least 3 '
'characters'
)
self.assertEqual(message.tags, "error")
def test_POSTing_same_slug_returns_no_message(self):
experiment = Experiment.objects.first()
response = self.client.post(
'/experiments/' + str(experiment.id) + '/change_slug/',
{'slug': experiment.slug}, follow=True
)
self.assertFalse(response.context['messages'])
@override_settings(HAYSTACK_CONNECTIONS=TEST_HAYSTACK_CONNECTIONS)
class SearchTest(TestCase):
def setUp(self):
self.owner = User.objects.create_user(
username='labor2', password='<PASSWORD>'
)
self.experiment = create_experiment(1, self.owner, Experiment.APPROVED)
haystack.connections.reload('default')
self.haystack_index('rebuild_index')
def tearDown(self):
self.haystack_index('clear_index')
@staticmethod
def haystack_index(action):
# Redirect sys.stderr to avoid display
# "GET http://127.0.0.1:9200/haystack/_mapping" during tests
# TODO: see:
# TODO: https://github.com/django-haystack/django-haystack/issues/1142
# TODO: path.join
stderr_backup, sys.stderr = sys.stderr, open('/dev/null', 'w+')
call_command(action, verbosity=0, interactive=False)
sys.stderr.close()
sys.stderr = stderr_backup
def check_matches_on_response(self, matches, text):
response = self.client.get('/search/', {'q': text})
self.assertEqual(response.status_code, 200)
# because in search results templates it's '<tr class ...>'
self.assertContains(response, '<tr', matches)
def test_search_redirects_to_homepage_with_search_results(self):
response = self.client.get('/search/', {'q': 'plexus'})
self.assertEqual(response.status_code, 200)
# TODO: is it needed to test for redirected page?
def test_search_nothing_redirects_to_homepage(self):
response = self.client.get('/search/', {'q': ''})
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], '/')
@skip
def test_search_returns_only_approved_experiments(self):
# response without filter
response = self.client.get('/search/', {'q': 'Braquial+Plexus'})
# TODO: complete this test!
def test_change_status_from_UNDER_ANALYSIS_to_APPROVED_reindex_haystack(
self):
# TODO: testing calling celery task directly. Didn't work posting
# TODO: approved experiment. Test with POST!
experiment = create_experiment(
1, self.owner, Experiment.UNDER_ANALYSIS
)
experiment.status = Experiment.APPROVED
experiment.save()
self.haystack_index('rebuild_index')
results = SearchQuerySet().filter(content=experiment.title)
self.assertEqual(results.count(), 1)
self.assertEqual(results[0].model_name, 'experiment')
self.assertEqual(results[0].object.title, experiment.title)
# TODO: test other searched objects
def test_search_eegsetting_returns_correct_number_of_objects(self):
test_search.SearchTest().create_objects_to_test_search_eeg_setting()
self.haystack_index('rebuild_index')
self.check_matches_on_response(3, 'eegsettingname')
def test_search_eegsetting_returns_matchings_containing_search_strings(self):
pass
# TODO!
def test_search_emgsetting_returns_correct_number_of_objects(self):
test_search.SearchTest().create_objects_to_test_search_emgsetting()
self.haystack_index('rebuild_index')
response = self.client.get('/search/', {'q': 'emgsettingname'})
self.assertEqual(response.status_code, 200)
# because in search results templates it's '<tr class ...>'
self.assertContains(response, '<tr', 3)
def test_search_step_returns_correct_objects(self):
search_text = 'schritt'
test_search.SearchTest().create_objects_to_test_search_step()
for step in Step.objects.all():
step.identification = search_text
step.save()
self.haystack_index('rebuild_index')
self.check_matches_on_response(3, search_text)
def test_search_stimulus_step_returns_correct_objects(self):
test_search.SearchTest().create_objects_to_test_search_stimulus_step()
self.haystack_index('rebuild_index')
self.check_matches_on_response(3, 'stimulusschritt')
def test_search_instruction_step_returns_correct_objects(self):
search_text = 'anweisungsschritt'
test_search.SearchTest().\
create_objects_to_test_search_instruction_step()
for instruction_step in Instruction.objects.all():
instruction_step.text = search_text
instruction_step.save()
self.haystack_index('rebuild_index')
self.check_matches_on_response(3, search_text)
def test_search_genericdatacollection_step_returns_correct_objects(self):
test_search.SearchTest().\
create_objects_to_test_search_genericdatacollection_step()
self.haystack_index('rebuild_index')
self.check_matches_on_response(3, 'generischedatensammlung')
def test_search_emgelectrodeplacementsetting_returns_correct_objects(self):
test_search.SearchTest(). \
create_objects_to_test_search_emgelectrodeplacementsetting(
'emg_electrode_placement')
self.haystack_index('rebuild_index')
self.check_matches_on_response(1, 'quadrizeps')
def test_search_eeg_electrode_position(self):
search_text = 'elektrodenposition'
test_search.SearchTest(). \
create_objects_to_test_search_eegelectrodeposition()
for eeg_electrode_position in EEGElectrodePosition.objects.all():
eeg_electrode_position.name = search_text
eeg_electrode_position.save()
self.haystack_index('rebuild_index')
self.check_matches_on_response(1, search_text)
# TODO: This test pass when it wouldn't
def test_search_eeg_electrode_position_returns_correct_related_objects_1(self):
search_text = 'elektrodenmodell'
test_search.SearchTest(
).create_objects_to_test_search_eegelectrodeposition()
# TODO: should test for all attributes
for electrode_model in ElectrodeModel.objects.all():
electrode_model.name = search_text
electrode_model.save()
self.haystack_index('rebuild_index')
self.check_matches_on_response(1, search_text)
def test_search_eeg_electrode_position_returns_correct_related_objects_2(self):
search_text = 'oberflächenelektrode'
test_search.SearchTest(
).create_objects_to_test_search_eegelectrodeposition(
'surface_electrode'
)
# TODO: should test for all attributes
for surface_electrode in SurfaceElectrode.objects.all():
surface_electrode.name = search_text
surface_electrode.save()
self.haystack_index('rebuild_index')
self.check_matches_on_response(1, search_text)
def test_search_eeg_electrode_position_returns_correct_related_objects_3(self):
search_text = 'intramuskuläre'
test_search.SearchTest(
).create_objects_to_test_search_eegelectrodeposition(
'intramuscular_electrode'
)
# TODO: should test for all attributes
for intramuscular_electrode in IntramuscularElectrode.objects.all():
intramuscular_electrode.strand = search_text
intramuscular_electrode.save()
self.haystack_index('rebuild_index')
self.check_matches_on_response(1, search_text)
def test_search_emgelectrodeplacementsetting_returns_correct_related_objects_1(self):
search_text = 'standardisierung'
test_search.SearchTest(). \
create_objects_to_test_search_emgelectrodeplacementsetting_with_emg_electrode_placement(
search_text
)
self.haystack_index('rebuild_index')
self.check_matches_on_response(1, search_text)
def test_search_emgelectrodeplacementsetting_returns_correct_related_objects_2(self):
search_text = 'starthaltung'
test_search.SearchTest(). \
create_objects_to_test_search_emgelectrodeplacementsetting_with_emg_surface_placement(
search_text
)
self.haystack_index('rebuild_index')
self.check_matches_on_response(1, search_text)
def test_search_emgelectrodeplacementsetting_returns_correct_related_objects_3(self):
search_text = 'einfügung'
test_search.SearchTest(). | |
<filename>compute_real_optimal_attacker.py
import numpy as np
def calculate_eu(target,
gamma,
eta,
uMat,
npvar,
N,
U_dc,
U_du,
U_ac,
U_au,
mode,
computeMode='full'):
"""
title::
calculate_eu
description::
Objective function in Appendix LP (Equation 1), for attacker or
defender. This provides the expected utility.
attributes::
target
Current target for which to solve
gamma
False negative rate
eta
Vector that depicts attacker behavior for each observation
{n, \sigma_0, \sigma_1} \in \Omega, where 1 represents attacking,
and 0 represents running away. So, \eta = 1 means an attacker will
attack no matter what signaling state is observed, and \eta = 0
means an attacker will never attack
uMat
Uncertainty matrix \Pi will contain the conditional probability
Pr[\omega^|\omega] for all \omega^, \omega \in \Omega
to describe how likely the attacker will observe a signaling
state \omega^ given the true signaling state is \omega
npvar
Final optimal decision variable values, make sure to input as numpy
array, e.g., np.array(variables)
N
Number of targets (graph size)
U_dc
U_+^d (defender utility when defender successfully protects target)
U_du
U_-^d (defender utility when defender fails to protect target)
U_ac
U_+^a (attacker utility when defender successfully protects target)
U_au
U_-^a (attacker utility when defender fails to protect target)
mode
Whether to calculate "attacker" or "defender" expected utility
computeMode
Whether to calculate "full" (meaning Appendix LP) or "detection"
only (meaning in main paper). Note that "detection" is depracated,
and was not used for experiments, but is included here to provide
some guidance just in case.
returns::
Defender or attacker expected utility
author::
<NAME> (<EMAIL>)
<NAME>, <NAME>, <NAME>
disclaimer::
This source code is provided "as is" and without warranties as to
performance or merchantability. The author and/or distributors of
this source code may have made statements about this source code.
Any such statements do not constitute warranties and shall not be
relied on by the user in deciding whether to use this source code.
This source code is provided without any express or implied warranties
whatsoever. Because of the diversity of conditions and hardware under
which this source code may be used, no warranty of fitness for a
particular purpose is offered. The user is advised to test the source
code thoroughly before relying on it. The user must assume the entire
risk of using the source code.
"""
if mode == 'attacker':
U_c = U_ac
U_u = U_au
else:
U_c = U_dc
U_u = U_du
objectiveValueCoef = np.zeros(len(npvar))
if computeMode == 'full':
uQuiet = (uMat[0,1]*eta[0] + uMat[1,1]*eta[1] + uMat[2,1]*eta[2])
uSignal = (uMat[0,2]*eta[0] + uMat[1,2]*eta[1] + uMat[2,2]*eta[2])
#U_-s^a/d
objectiveValueCoef[0*N + target] = U_c[target] #x_t^p * U_c
objectiveValueCoef[1*N + target] = U_c[target] * eta[0] #x_t^n+ * U_c
objectiveValueCoef[2*N + target] = U_u[target] * eta[0] #x_t^n- * U_u
#Combination of U_sigma1^a/d and U_sigma0^a/d.
#x's in U_sigma1^a/d
objectiveValueCoef[5*N + target] = U_c[target] * uSignal
objectiveValueCoef[4*N + target] = ((1-gamma) * U_c[target] * \
uSignal) + \
(gamma * U_u[target] * uSignal)
objectiveValueCoef[3*N + target] = U_u[target] * uSignal
#U_c psi_t^s+
objectiveValueCoef[8*N + target] = ((1-gamma) * U_c[target] * \
uQuiet) - \
((1-gamma) * U_c[target] * \
uSignal)
#U_c psi_t^s-
objectiveValueCoef[6*N + target] = ((1-gamma) * U_c[target] * \
uQuiet) - \
((1-gamma) * U_c[target] * \
uSignal)
#U_u psi_t^sbar
objectiveValueCoef[7*N + target] = ((1-gamma) * U_u[target] * \
uQuiet) - \
((1-gamma) * U_u[target] * \
uSignal)
#U_c phi_t^s+
objectiveValueCoef[11*N + target] = (gamma * U_c[target] * \
uQuiet) - \
(gamma * U_c[target] * uSignal)
#U_u phi_t^s-
objectiveValueCoef[9*N + target] = (gamma * U_u[target] * uQuiet)-\
(gamma * U_u[target] * uSignal)
#U_u phi_t^sbar
objectiveValueCoef[10*N + target] = (gamma * U_u[target] * \
uQuiet) - \
(gamma * U_u[target] * uSignal)
else:
objectiveValueCoef[0*N + target] = U_c[target] #x_t^p * U_c
objectiveValueCoef[1*N + target] = U_c[target] #x_t^n+ * U_c
objectiveValueCoef[2*N + target] = U_u[target] #x_t^n- * U_u
objectiveValueCoef[8*N + target] = (1-gamma) * U_c[target] #U_c psi_t^s+
objectiveValueCoef[6*N + target] = (1-gamma) * U_c[target] #U_c psi_t^s-
objectiveValueCoef[7*N + target] = (1-gamma) * U_u[target] #U_u psi_t^sbar
objectiveValueCoef[11*N + target] = gamma * U_c[target] #U_c phi_t^s+
objectiveValueCoef[9*N + target] = gamma * U_u[target] #U_u phi_t^s-
objectiveValueCoef[10*N + target] = gamma * U_u[target] #U_u phi_t^sbar
return objectiveValueCoef.dot(npvar)
def compute_real_optimal_attacker(N,
variables,
strategies,
G,
uMat,
gamma,
U_dc,
U_du,
U_ac,
U_au,
computeMode='full'):
"""
title::
compute_real_optimal_attacker
description::
Recalculate objective value for variable assignments from run without
considering uncertainty, but with the true uncertainty matrix/gamma.
attributes::
N
Number of targets (graph size)
variables
Final optimal decision variable values
strategies
Pure strategies (None if wanting to run relaxed version)
G
Graph object (networkx)
uMat
Uncertainty matrix \Pi will contain the conditional probability
Pr[\omega^|\omega] for all \omega^, \omega \in \Omega
to describe how likely the attacker will observe a signaling
state \omega^ given the true signaling state is \omega
gamma
False negative rate
U_dc
U_+^d (defender utility when defender successfully protects target)
U_du
U_-^d (defender utility when defender fails to protect target)
U_ac
U_+^a (attacker utility when defender successfully protects target)
U_au
U_-^a (attacker utility when defender fails to protect target)
computeMode
Whether to calculate "full" (meaning Appendix LP) or "detection"
only (meaning in main paper). Note that "detection" is depracated,
and was not used for experiments, but is included here to provide
some guidance just in case.
returns::
curObj
Actual defender expected utility with uncertainty
bestTarget
Attacker best target in this uncertainty scenario
bestEta
Attacker best behavior in this uncertainty scenario
author::
<NAME> (<EMAIL>)
<NAME>, <NAME>, <NAME>
disclaimer::
This source code is provided "as is" and without warranties as to
performance or merchantability. The author and/or distributors of
this source code may have made statements about this source code.
Any such statements do not constitute warranties and shall not be
relied on by the user in deciding whether to use this source code.
This source code is provided without any express or implied warranties
whatsoever. Because of the diversity of conditions and hardware under
which this source code may be used, no warranty of fitness for a
particular purpose is offered. The user is advised to test the source
code thoroughly before relying on it. The user must assume the entire
risk of using the source code.
"""
npvar = np.array(variables)
objValues = []
allTs = []
allEtas = []
for target in range(N):
if computeMode == 'full':
etais = [[0,0,0]]
etai = [0,0,0]
#Determine relevant attacker behaviors (without looping through all
#possible etas, to save time). Based on Appendix Equation 10.
for state in range(3):
UTheta = np.zeros(len(variables))
UTheta[1*N+target] = U_ac[target]*uMat[state,0]
UTheta[2*N+target] = U_au[target]*uMat[state,0]
UTheta[5*N+target] = U_ac[target]*uMat[state,2]
UTheta[4*N+target] = ((1-gamma) * U_ac[target]*uMat[state,2])+\
(gamma*U_au[target]*uMat[state,2])
UTheta[3*N+target] = U_au[target]*uMat[state,2]
UTheta[8*N+target] = ((1-gamma) * -U_ac[target] * uMat[state,2])+\
((1-gamma) * U_ac[target] * uMat[state,1])
UTheta[6*N+target] = ((1-gamma) * -U_ac[target] * uMat[state,2])+\
((1-gamma) * U_ac[target] * uMat[state,1])
UTheta[7*N+target] = ((1-gamma) * -U_au[target] * uMat[state,2])+\
((1-gamma) * U_au[target] * uMat[state,1])
UTheta[11*N+target] = (gamma * -U_ac[target] * uMat[state,2])+\
(gamma * U_ac[target] * uMat[state,1])
UTheta[9*N+target] = (gamma * -U_au[target] * uMat[state,2])+\
(gamma * U_au[target] * uMat[state,1])
UTheta[10*N+target] = (gamma * -U_au[target] * uMat[state,2])+\
(gamma * U_au[target] * uMat[state,1])
#If it's positive, eta should be 1.
if UTheta.dot(npvar) > 1e-3:
for etai in etais:
etai[state] = 1
#If it's negative, eta should be 0.
elif UTheta.dot(npvar) < -1e-3:
for etai in etais:
etai[state] = 0
#If it's zero, could be 0 or 1, so add both.
else:
addEtais =[]
for etai in etais:
etai[state] = 0
addEtais.append(etai.copy())
addEtais[-1][state] = 1
etais += addEtais
#Loop through attacker behaviors and calculate attacker expected
#utilities.
for etai in etais:
attEU = calculate_eu(target,
gamma,
etai,
uMat,
npvar,
N,
U_dc,
U_du,
U_ac,
U_au,
mode='attacker',
computeMode=computeMode)
objValues.append(attEU)
allTs.append(target)
allEtas.append(etai)
else:
#Don't need behaviors for loop, just calculate directly.
attEU = calculate_eu(target,
gamma,
etai,
uMat,
npvar,
N,
U_dc,
U_du,
| |
test_merge_request_pull_merge_with_delete_branch(self, send_email):
""" Test the merge_request_pull endpoint with a merge PR and delete source branch. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session,
self.path,
new_project=None,
branch_from="feature-branch",
mtype="merge",
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
data = {
"csrf_token": self.get_csrf(output=output),
"delete_branch": True,
}
# Merge
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature-branch branch - test\n - Pagure</title>",
output_text,
)
# Check the branch is not mentioned
self.assertNotIn(
'<a class="" href="/test/branch/feature-branch"', output_text
)
@patch("pagure.lib.notify.send_email")
def test_merge_request_pull_conflicts(self, send_email):
""" Test the merge_request_pull endpoint with a conflicting PR. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session,
self.path,
new_project=None,
branch_from="feature",
mtype="conflicts",
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
data = {"csrf_token": csrf_token}
# Merge conflicts
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertIn("Merge conflicts!", output_text)
@patch("pagure.lib.notify.send_email")
def test_merge_request_pull_conflicts_with_delete_branch(self, send_email):
""" Test the merge_request_pull endpoint with a conflicting PR and request deletion of branch. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session,
self.path,
new_project=None,
branch_from="feature-branch",
mtype="conflicts",
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
data = {
"csrf_token": self.get_csrf(output=output),
"delete_branch": True,
}
# Merge conflicts
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n'
' <span class="text-success font-weight-bold">#1</span>\n'
' <span class="font-weight-bold">\n'
" PR from the feature-branch branch\n",
output_text,
)
self.assertIn("Merge conflicts!", output_text)
# Check the branch still exists
output = self.app.get("/test/branches")
self.assertIn("feature-branch", output.get_data(as_text=True))
@patch("pagure.lib.notify.send_email")
def test_merge_request_pull_nochange(self, send_email):
""" Test the merge_request_pull endpoint. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session,
self.path,
new_project=None,
branch_from="master",
mtype="nochanges",
)
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
csrf_token = self.get_csrf(output=output)
data = {"csrf_token": csrf_token}
# Nothing to merge
output = self.app.post(
"/test/pull-request/1/merge", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"Nothing to do, changes were already merged", output_text
)
# Check if the closing notification was added
output = self.app.get("/test/pull-request/1")
output_text = output.get_data(as_text=True)
self.assertIsNotNone(re.search(MERGED_PATTERN, output_text))
@patch("pagure.lib.notify.send_email")
def test_request_pull_close(self, send_email):
""" Test the request_pull endpoint with a closed PR. """
send_email.return_value = True
self.test_merge_request_pull_FF()
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIsNotNone(re.search(MERGED_PATTERN, output_text))
self.assertIn(
'title="View file as of 2a552bb">sources</a>', output_text
)
@patch("pagure.lib.notify.send_email")
def test_request_pull_disabled(self, send_email):
""" Test the request_pull endpoint with PR disabled. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
set_up_git_repo(
self.session, self.path, new_project=None, branch_from="feature"
)
# Project w/o pull-request
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["pull_requests"] = False
repo.settings = settings
self.session.add(repo)
self.session.commit()
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 404)
@patch("pagure.lib.notify.send_email")
@patch("pagure.lib.git.update_pull_ref")
def test_request_pull_empty_repo(self, send_email, update_pull_ref):
""" Test the request_pull endpoint against an empty repo. """
# Mock update_pull_ref or the repo won't be empty anymore
# (the PR will have been pushed to refs/pull)
send_email.return_value = True
tests.create_projects(self.session)
item = pagure.lib.model.Project(
user_id=2, # foo
name="test",
description="test project #1",
hook_token="aaabbb",
is_fork=True,
parent_id=1,
)
self.session.add(item)
self.session.commit()
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.create_projects_git(
os.path.join(self.path, "repos", "forks", "foo"), bare=True
)
# Create a git repo to play with
gitrepo = os.path.join(self.path, "repos", "test.git")
self.assertFalse(os.path.exists(gitrepo))
os.makedirs(gitrepo)
repo = pygit2.init_repository(gitrepo, bare=True)
# Create a fork of this repo
newpath = tempfile.mkdtemp(prefix="pagure-fork-test")
gitrepo = os.path.join(self.path, "repos", "forks", "foo", "test.git")
new_repo = pygit2.clone_repository(gitrepo, newpath)
# Edit the sources file again
with open(os.path.join(newpath, "sources"), "w") as stream:
stream.write("foo\n bar\nbaz\n boose")
new_repo.index.add("sources")
new_repo.index.write()
# Commits the files added
tree = new_repo.index.write_tree()
author = pygit2.Signature("<NAME>", "<EMAIL>")
committer = pygit2.Signature("Cecil Committer", "<EMAIL>")
new_repo.create_commit(
"refs/heads/feature",
author,
committer,
"A commit on branch feature",
tree,
[],
)
refname = "refs/heads/feature:refs/heads/feature"
ori_remote = new_repo.remotes[0]
PagureRepo.push(ori_remote, refname)
# Create a PR for these changes
project = pagure.lib.query.get_authorized_project(self.session, "test")
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=item,
branch_from="feature",
repo_to=project,
branch_to="master",
title="PR from the feature branch",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "PR from the feature branch")
output = self.app.get("/test/pull-request/1")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertTrue(output_text.count('<span class="commitdate"'), 1)
self.assertTrue(update_pull_ref.called)
shutil.rmtree(newpath)
@patch("pagure.lib.notify.send_email")
def test_request_pull_empty_fork(self, send_email):
""" Test the request_pull endpoint from an empty fork. """
send_email.return_value = True
tests.create_projects(self.session)
item = pagure.lib.model.Project(
user_id=2, # foo
name="test",
description="test project #1",
hook_token="aa<PASSWORD>",
is_fork=True,
parent_id=1,
)
self.session.add(item)
self.session.commit()
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.create_projects_git(
os.path.join(self.path, "repos", "forks", "foo"), bare=True
)
# Create a git repo to play with
gitrepo = os.path.join(self.path, "repos", "test.git")
self.assertFalse(os.path.exists(gitrepo))
os.makedirs(gitrepo)
repo = pygit2.init_repository(gitrepo, bare=True)
# Create a fork of this repo
newpath = tempfile.mkdtemp(prefix="pagure-fork-test")
gitrepo = os.path.join(self.path, "repos", "forks", "foo", "test.git")
new_repo = pygit2.clone_repository(gitrepo, newpath)
# Create a PR for these "changes" (there are none, both repos are
# empty)
project = pagure.lib.query.get_authorized_project(self.session, "test")
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=item,
branch_from="feature",
repo_to=project,
branch_to="master",
title="PR from the feature branch",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "PR from the feature branch")
output = self.app.get("/test/pull-request/1", follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - Pagure</title>",
output_text,
)
self.assertIn(
"Fork is empty, there are no "
"commits to create a pull request with",
output_text,
)
shutil.rmtree(newpath)
@patch("pagure.lib.notify.send_email")
def test_request_pulls_order(self, send_email):
"""Test the request_pulls
i.e Make sure that the results are displayed
in the order required by the user"""
send_email.return_value = True
# Initially no project
output = self.app.get("/test/pull-requests")
self.assertEqual(output.status_code, 404)
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
repo = pagure.lib.query.get_authorized_project(self.session, "test")
item = pagure.lib.model.Project(
user_id=2,
name="test",
description="test project #1",
hook_token="<PASSWORD>",
is_fork=True,
parent_id=1,
)
self.session.add(item)
self.session.commit()
# create PR's to play with
# PR-1
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_to=repo,
repo_from=item,
branch_from="feature",
branch_to="master",
title="PR from the feature branch",
user="pingou",
status="Open",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "PR from the feature branch")
# PR-2
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_to=repo,
branch_to="master",
branch_from="feature",
repo_from=item,
title="test PR",
user="pingou",
status="Open",
)
self.session.commit()
self.assertEqual(req.title, "test PR")
# PR-3
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_to=repo,
branch_from="feature",
branch_to="master",
repo_from=item,
title="test Invalid PR",
user="pingou",
status="Closed",
)
self.session.commit()
self.assertEqual(req.title, "test Invalid PR")
# PR-4
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_to=repo,
branch_from="feature",
title="test PR for sort",
repo_from=item,
user="pingou",
branch_to="master",
status="Open",
)
self.session.commit()
self.assertEqual(req.title, "test PR for sort")
# sort by last_updated
output = self.app.get("/test/pull-requests?order_key=last_updated")
output_text = output.get_data(as_text=True)
tr_elements = re.findall(
'<div class="request-row list-group-item list-group-item-action ">(.*?)</div><!--end request-row-->',
output_text,
re.M | re.S,
)
self.assertEqual(output.status_code, 200)
# Make sure that issue four is first since it was modified last
self.assertIn('href="/test/pull-request/4"', tr_elements[0])
self.assertIn('href="/test/pull-request/2"', tr_elements[1])
self.assertIn('href="/test/pull-request/1"', tr_elements[2])
pr_one = pagure.lib.query.search_pull_requests(
self.session, project_id=1, requestid=1
)
pr_one.updated_on = datetime.utcnow() + timedelta(seconds=2)
self.session.add(pr_one)
self.session.commit()
# sort by last_updated
output = self.app.get("/test/pull-requests?order_key=last_updated")
output_text = output.get_data(as_text=True)
tr_elements = re.findall(
'<div class="request-row list-group-item list-group-item-action ">(.*?)</div><!--end request-row-->',
output_text,
re.M | re.S,
)
self.assertEqual(output.status_code, 200)
# Make sure that PR four is first since it was modified last
self.assertIn('href="/test/pull-request/1"', tr_elements[0])
# Make sure that PR two is second since it was modified second
self.assertIn('href="/test/pull-request/4"', tr_elements[1])
# Make sure that PR one is last since it was modified first
self.assertIn('href="/test/pull-request/2"', tr_elements[2])
# Now query so that the results are ascending
output = self.app.get(
"/test/pull-requests?" "order_key=last_updated&order=asc"
)
output_text = output.get_data(as_text=True)
tr_elements = re.findall(
'<div class="request-row list-group-item list-group-item-action ">(.*?)</div><!--end request-row-->',
output_text,
re.M | re.S,
)
self.assertIn('href="/test/pull-request/2"', tr_elements[0])
self.assertIn('href="/test/pull-request/4"', tr_elements[1])
self.assertIn('href="/test/pull-request/1"', tr_elements[2])
# check that search_pattern argument works
output = self.app.get("/test/pull-requests?search_pattern=feature")
output_text = output.get_data(as_text=True)
tr_elements = re.findall(
'<div class="request-row list-group-item list-group-item-action ">(.*?)</div><!--end request-row-->',
output_text,
re.M | re.S,
)
self.assertIn('href="/test/pull-request/1"', tr_elements[0])
self.assertEqual(len(tr_elements), 1)
output = self.app.get("/test/pull-requests?search_pattern=PR")
output_text = output.get_data(as_text=True)
tr_elements = re.findall(
'<div class="request-row list-group-item list-group-item-action ">(.*?)</div><!--end request-row-->',
output_text,
re.M | re.S,
)
self.assertIn('href="/test/pull-request/4"', tr_elements[0])
self.assertIn('href="/test/pull-request/2"', tr_elements[1])
self.assertIn('href="/test/pull-request/1"', tr_elements[2])
self.assertEqual(len(tr_elements), 3)
output = self.app.get("/test/pull-requests?search_pattern=*PR")
output_text = output.get_data(as_text=True)
tr_elements = re.findall(
'<div class="request-row list-group-item list-group-item-action ">(.*?)</div><!--end request-row-->',
output_text,
re.M | re.S,
)
self.assertEqual(len(tr_elements), 1)
| |
header you want to replace.
- This parameter is only valid with the C(replace) type.
type: dict
suboptions:
event:
description:
- Type of event when the C(http_host) is replaced.
type: str
required: True
choices:
- request
- proxy_connect
- proxy_request
value:
description:
- The value of C(http_host).
type: str
required: True
version_added: "1.8.0"
http_uri:
description:
- Replaces HTTP URI, path, or string.
- This parameter is only valid with the C(replace) type.
type: dict
suboptions:
event:
description:
- Type of event when the C(http_uri) is replaced.
type: str
required: True
choices:
- request
- proxy_connect
- proxy_request
type:
description:
- Specifies the part of the C(http_uri) to be replaced.
type: str
required: True
choices:
- path
- query_string
- full_string
value:
description:
- The value of C(http_uri).
type: str
required: True
version_added: "1.8.0"
conditions:
description:
- A list of attributes that describe the condition.
- See suboptions for details on how to construct each list entry.
- The ordering of this list is important, the module ensures the order is
kept when modifying the task.
- The suboption options below are not required for all condition types,
read the description for more details.
- These conditions can be specified in any order. Despite the fact they are in a list,
the order in the list does not matter to the BIG-IP.
type: list
elements: dict
suboptions:
type:
description:
- The condition type. This value controls which of the following options are required.
- "When C(type) is C(http_uri), the valid choices are: C(path_begins_with_any), C(path_contains) or
C(path_is_any)."
- "When C(type) is C(http_host), the valid choices are: C(host_is_any), C(host_is_not_any),
C(host_begins_with_any) or C(host_ends_with_any)."
- "When C(type) is C(http_header), the C(header_name) parameter is mandatory and the valid choice is:
C(header_is_any)."
- "When C(type) is C(http_method), the valid choices are: C(method_matches_with_any)."
- When C(type) is C(all_traffic), the system removes all existing conditions from
this rule.
type: str
required: True
choices:
- http_uri
- all_traffic
- http_host
- http_header
- http_method
- ssl_extension
- tcp
path_begins_with_any:
description:
- A list of strings of characters the HTTP URI should start with.
- This parameter is only valid with the C(http_uri) type.
type: list
elements: str
path_contains:
description:
- A list of strings of characters the HTTP URI should contain.
- This parameter is only valid with the C(http_uri) type.
type: list
elements: str
version_added: "1.8.0"
path_is_any:
description:
- A list of strings of characters the HTTP URI should match.
- This parameter is only valid with the C(http_uri) type.
type: list
elements: str
version_added: "1.8.0"
host_is_any:
description:
- A list of strings of characters the HTTP Host should match.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
host_is_not_any:
description:
- A list of strings of characters the HTTP Host should not match.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
host_begins_with_any:
description:
- A list of strings of characters the HTTP Host should start with.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
host_ends_with_any:
description:
- A list of strings of characters the HTTP Host should end with.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
version_added: "1.8.0"
header_is_any:
description:
- A list of strings of characters the HTTP Header value should match.
- This parameter is only valid with the C(http_header) type.
type: list
elements: str
version_added: "1.8.0"
header_name:
description:
- A name of C(http_header).
- This parameter is only valid with the C(http_header) type.
type: str
version_added: "1.8.0"
method_matches_with_any:
description:
- A list of strings of characters the HTTP Method value should match.
- This parameter is only valid with the C(http_method) type.
type: list
elements: str
version_added: "1.10.0"
server_name_is_any:
description:
- A list of strings of characters the SSL Extension should match.
- This parameter is only valid with the C(ssl_extension) type.
type: list
elements: str
address_matches_with_any:
description:
- A list of IP Subnet address strings the IP address should match.
- This parameter is only valid with the C(tcp) type.
type: list
elements: str
version_added: "1.8.0"
address_matches_with_datagroup:
description:
- A list of internal datagroup strings the IP address should match.
- This parameter is only valid with the C(tcp) type.
type: list
elements: str
version_added: "1.8.0"
address_matches_with_external_datagroup:
description:
- A list of external datagroup strings the IP address should match.
- This parameter is only valid with the C(tcp) type.
type: list
elements: str
version_added: "1.10.0"
event:
description:
- Events on which conditions type match rules can be triggered.
- Supported only for C(http_header), C(http_method), C(ssl_extension) and C(tcp).
- "Valid choices for C(http_header) condition types are: C(proxy_connect),
C(proxy_request), C(proxy_response), C(request) and C(response)."
- "Valid choices for C(http_method) condition types are: C(proxy_connect),
C(proxy_request), C(proxy_response), C(request) and C(response)."
- "Valid choices for C(tcp) condition types are: C(request), C(client_accepted),
C(proxy_connect), C(proxy_request), C(proxy_response), C(ssl_client_hello), and
C(ssl_client_server_hello_send)."
- "Valid choices for C(ssl_extension) are: C(ssl_client_hello), and C(ssl_client_server_hello_send)."
type: str
state:
description:
- When C(present), ensures the key is uploaded to the device. When
C(absent), ensures the key is removed from the device. If the key
is currently in use, the module will not be able to remove the key.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
requirements:
- BIG-IP >= v12.1.0
author:
- <NAME> (@caphrim007)
- <NAME> (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create policies
bigip_policy:
name: Policy-Foo
state: present
provider:
server: lb.mydomain.com
user: admin
password: <PASSWORD>
delegate_to: localhost
- name: Add a rule to the new policy
bigip_policy_rule:
policy: Policy-Foo
name: rule3
conditions:
- type: http_uri
path_begins_with_any:
- /ABC
actions:
- type: forward
pool: pool-svrs
provider:
server: lb.mydomain.com
user: admin
password: <PASSWORD>
delegate_to: localhost
- name: Add multiple rules to the new policy
bigip_policy_rule:
policy: Policy-Foo
name: "{{ item.name }}"
conditions: "{{ item.conditions }}"
actions: "{{ item.actions }}"
provider:
server: lb.mydomain.com
user: admin
password: <PASSWORD>
delegate_to: localhost
loop:
- name: rule1
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_begins_with_any:
- /euro
- name: rule2
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_begins_with_any:
- /HomePage/
- name: rule3
actions:
- type: set_variable
variable_name: user-agent
expression: tcl:[HTTP::header User-Agent]
event: request
conditions:
- type: http_uri
path_begins_with_any:
- /HomePage/
- name: Remove all rules and conditions from the rule
bigip_policy_rule:
policy: Policy-Foo
name: rule1
conditions:
- type: all_traffic
actions:
- type: ignore
provider:
server: lb.mydomain.com
user: admin
password: <PASSWORD>
delegate_to: localhost
'''
RETURN = r'''
actions:
description: The new list of actions applied to the rule.
returned: changed
type: complex
contains:
type:
description: The action type.
returned: changed
type: str
sample: forward
pool:
description: Pool for forwarding to.
returned: changed
type: str
sample: foo-pool
sample: hash/dictionary of values
conditions:
description: The new list of conditions applied to the rule.
returned: changed
type: complex
contains:
type:
description: The condition type.
returned: changed
type: str
sample: http_uri
path_begins_with_any:
description: List of strings the URI begins with.
returned: changed
type: list
sample: [foo, bar]
sample: hash/dictionary of values
description:
description: The new description of the rule.
returned: changed
type: str
sample: My rule
rule_order:
description: Specifies a number that indicates the order of this rule relative to other rules in the policy.
returned: changed
type: int
sample: 10
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ansible.module_utils.six import iteritems
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name
)
from ..module_utils.compare import compare_complex_list
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'actionsReference': 'actions',
'conditionsReference': 'conditions',
'ordinal': 'rule_order',
}
api_attributes = [
'description',
'actions',
'conditions',
'ordinal',
]
updatables = [
'actions',
'conditions',
'description',
'rule_order',
]
returnables = [
'description',
'action',
'conditions',
'rule_order',
]
@property
def name(self):
return self._values.get('name', None)
@property
def description(self):
return self._values.get('description', None)
@property
def policy(self):
if self._values['policy'] is None:
return None
return self._values['policy']
class ApiParameters(Parameters):
def _remove_internal_keywords(self, resource):
items = | |
# -*- coding: utf-8 -*-
import pytest
from hexrec.blocks import *
# ============================================================================
def test_chop_blocks_doctest():
ans_out = list(chop_blocks('ABCDEFG', 2, start=10))
ans_ref = [(10, 'AB'), (12, 'CD'), (14, 'EF'), (16, 'G')]
assert ans_out == ans_ref
ans_out = list(chop_blocks('ABCDEFG', 4, 3, 10))
ans_ref = [(13, 'A'), (14, 'BCDE'), (18, 'FG')]
assert ans_out == ans_ref
# ============================================================================
def test_overlap_doctest():
assert not overlap((1, 'ABCD'), (5, 'xyz'))
assert overlap((1, 'ABCD'), (3, 'xyz'))
# ============================================================================
def test_sequence_doctest():
assert check_sequence([(1, 'ABC'), (6, 'xyz')])
assert not check_sequence([(1, 'ABC'), (2, 'xyz')])
assert not check_sequence([(6, 'ABC'), (1, 'xyz')])
# ============================================================================
def test_sorting_doctest():
blocks = [(2, 'ABC'), (7, '>'), (2, '$'), (0, '<'), (2, '11')]
ans_ref = [(0, '<'), (2, 'ABC'), (2, '$'), (2, '11'), (7, '>')]
ans_out = list(blocks)
ans_out.sort(key=sorting)
assert ans_out == ans_ref
# ============================================================================
def test_locate_at_doctest():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = [None, 0, 0, 0, 0, None, 1, None, 2, 2, 2, None]
ans_out = [locate_at(blocks, i) for i in range(12)]
assert ans_out == ans_ref
def test_locate_at():
assert locate_at((), 1) is None
# ============================================================================
def test_locate_start_doctest():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = [0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3]
ans_out = [locate_start(blocks, i) for i in range(12)]
assert ans_out == ans_ref
def test_locate_start():
assert locate_start((), 1) == 0
# ============================================================================
def test_locate_endex_doctest():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = [0, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3]
ans_out = [locate_endex(blocks, i) for i in range(12)]
assert ans_out == ans_ref
def test_locate_endex():
assert locate_endex((), 1) == 0
# ============================================================================
def test_shift_doctest():
blocks = [(1, 'ABCD'), (7, 'xyz')]
ans_ref = [(0, 'ABCD'), (6, 'xyz')]
ans_out = shift(blocks, -1)
assert ans_out == ans_ref
# ============================================================================
def test_read_doctest():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = [(3, 'CD'), (6, '$'), (8, 'xy')]
ans_out = read(blocks, 3, 10, None)
assert ans_out == ans_ref
ans_ref = [(3, 'CD'), (5, '#'), (6, '$'), (7, '#'), (8, 'xy')]
ans_out = read(blocks, 3, 10, '#', ''.join)
assert ans_out == ans_ref
ans_ref = [(1, 'ABCD'), (6, '$'), (8, 'xy')]
ans_out = read(blocks, None, 10, None)
assert ans_out == ans_ref
ans_ref = [(3, 'CD'), (6, '$'), (8, 'xyz')]
ans_out = read(blocks, 3, None, None)
assert ans_out == ans_ref
ans_ref = []
ans_out = read(blocks, 5, 6, None)
assert ans_out == ans_ref
def test_read():
blocks = [(1, 'ABCD')]
ans_ref = [(2, 'BC')]
ans_out = read(blocks, 2, 4)
assert ans_out == ans_ref
blocks = [(2, 'BC')]
ans_ref = blocks
ans_out = read(blocks, None, None)
assert ans_out == ans_ref
assert read(blocks, 3, -3) == []
assert read([], None, None) == []
assert read([], 3, None) == []
assert read([], None, 3) == []
# ============================================================================
def test_clear_doctest():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = [(1, 'A'), (3, 'C'), (9, 'yz')]
ans_out = list(blocks)
ans_out = clear(ans_out, 4, 9)
ans_out = clear(ans_out, 2, 2)
ans_out = clear(ans_out, 2, 3)
assert ans_out == ans_ref
def test_clear():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = []
ans_out = clear(blocks, None, None)
assert ans_out == ans_ref
assert clear(blocks, 3, -3) == blocks
assert clear([], None, None) == []
assert clear([], 3, None) == []
assert clear([], None, 3) == []
# ============================================================================
def test_delete_doctest():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = [(1, 'A'), (2, 'C'), (3, 'yz')]
ans_out = list(blocks)
ans_out = delete(ans_out, 4, 9)
ans_out = delete(ans_out, 2, 2)
ans_out = delete(ans_out, 2, 3)
assert ans_out == ans_ref
def test_delete():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
ans_ref = []
ans_out = delete(blocks, None, None)
assert ans_out == ans_ref
# ============================================================================
def test_reserve_doctest():
blocks = [(0, 'ABCD'), (6, 'xyz')]
ans_ref = [(0, 'ABCD'), (6, 'xy'), (9, 'z')]
ans_out = list(blocks)
ans_out = reserve(ans_out, 10, 1)
ans_out = reserve(ans_out, 8, 1)
assert ans_out == ans_ref
def test_reserve():
blocks = [(0, 'ABCD'), (6, 'xyz')]
ans_ref = blocks
ans_out = reserve(blocks, 10, 0)
assert ans_out == ans_ref
ans_ref = [(0, 'ABCD'), (7, 'xyz')]
ans_out = reserve(blocks, 6, 1)
assert ans_out == ans_ref
# ============================================================================
def test_insert_doctest():
blocks = [(0, 'ABCD'), (6, 'xyz')]
ans_ref = [(0, 'ABCD'), (6, 'xy'), (8, '1'), (9, 'z'), (11, '$')]
ans_out = list(blocks)
ans_out = insert(ans_out, (10, '$'))
ans_out = insert(ans_out, (8, '1'))
assert ans_out == ans_ref
def test_insert():
blocks = [(0, 'ABCD'), (6, 'xyz')]
ans_ref = blocks
ans_out = insert(blocks, (10, ''))
assert ans_out == ans_ref
ans_ref = [(0, 'ABCD'), (6, '$'), (7, 'xyz')]
ans_out = insert(blocks, (6, '$'))
assert ans_out == ans_ref
# ============================================================================
def test_write_doctest():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xy')]
ans_ref = [(1, 'AB'), (3, '123456'), (9, 'y')]
ans_out = write(blocks, (3, '123456'))
assert ans_out == ans_ref
def test_write():
blocks = [(1, 'ABCD'), (6, '$'), (8, 'xy')]
ans_ref = blocks
ans_out = write(blocks, (3, ''))
assert ans_out == ans_ref
# ============================================================================
def test_fill_doctest():
blocks = [(1, 'ABC'), (6, 'xyz')]
ans_ref = [(1, '23123123')]
ans_out = fill(blocks, pattern='123', join=''.join)
assert ans_out == ans_ref
ans_ref = [(0, '12312'), (6, 'xyz')]
ans_out = fill(blocks, pattern='123', start=0, endex=5, join=''.join)
assert ans_out == ans_ref
ans_ref = [(1, 'ABC'), (5, '31231')]
ans_out = fill(blocks, pattern='123', start=5, endex=10, join=''.join)
assert ans_out == ans_ref
def test_fill():
with pytest.raises(ValueError):
fill([])
blocks = [(1, 'ABC'), (6, 'xyz')]
ans_ref = blocks
ans_out = fill(blocks, start=5, endex=5, join=''.join)
assert ans_out == ans_ref
ans_ref = [(1, '########')]
ans_out = fill(blocks, pattern=('#' * 64), join=''.join)
assert ans_out == ans_ref
# ============================================================================
def test_flood_doctest():
blocks = [(1, 'ABC'), (6, 'xyz')]
ans_ref = [(1, 'ABC'), (4, '23'), (6, 'xyz')]
ans_out = flood(blocks, pattern='123', join=''.join)
assert ans_out == ans_ref
ans_ref = [(4, '23')]
ans_out = flood(blocks, pattern='123', flood_only=True, join=''.join)
assert ans_out == ans_ref
ans_ref = [(0, '1'), (1, 'ABC'), (4, '2'), (6, 'xyz')]
ans_out = flood(blocks, 0, 5, '123', join=''.join)
assert ans_out == ans_ref
ans_ref = [(1, 'ABC'), (5, '3'), (6, 'xyz'), (9, '1')]
ans_out = flood(blocks, 5, 10, '123', join=''.join)
assert ans_out == ans_ref
def test_flood():
with pytest.raises(ValueError):
flood([])
blocks = [(1, 'ABC'), (6, 'xyz')]
ans_ref = blocks
ans_out = flood(blocks, start=5, endex=5, join=''.join)
assert ans_out == ans_ref
ans_ref = [(1, 'ABC'), (4, '##'), (6, 'xyz')]
ans_out = flood(blocks, pattern=('#' * 64), join=''.join)
assert ans_out == ans_ref
# ============================================================================
def test_merge_doctest():
blocks = [(0, 'Hello,'), (6, ' '), (7, 'World'), (12, '!')]
ans_ref = [(0, 'Hello, World!')]
ans_out = merge(blocks, join=''.join)
assert ans_out == ans_ref
def test_merge():
blocks = [(0, 'Hello,'), (6, ''), (7, 'World'), (12, '!'), (15, '$')]
ans_ref = [(0, 'Hello,'), (7, 'World!'), (15, '$')]
ans_out = merge(blocks, join=''.join)
assert ans_out == ans_ref
# ============================================================================
def test_collapse_doctest():
blocks = [
(0, '0123456789'),
(0, 'ABCD'),
(3, 'EF'),
(0, '$'),
(6, 'xyz'),
]
ans_ref = [(5, '5'), (1, 'BC'), (3, 'EF'), (0, '$'), (9, '9'), (6, 'xyz')]
ans_out = collapse(blocks)
assert ans_out == ans_ref
def test_collapse():
blocks = [
(0, ''),
(0, 'ABCD'),
(3, ''),
(1, '$'),
(0, 'xyz'),
]
ans_ref = [(0, None), (3, 'D'), (1, None), (0, 'xyz')]
ans_out = collapse(blocks)
assert ans_out == ans_ref
blocks = [
(0, 'ABCD'),
(3, 'EF'),
(0, '$'),
(6, 'xyz'),
]
ans_ref = [(1, 'BC'), (3, 'EF'), (0, '$'), (6, 'xyz')]
ans_out = collapse(blocks)
assert ans_out == ans_ref
# ============================================================================
def test_union_doctest():
blocks1 = [
(0, '0123456789'),
(0, 'ABCD'),
]
blocks2 = [
(3, 'EF'),
(0, '$'),
(6, 'xyz'),
]
ans_ref = [(0, '$'), (1, 'BC'), (3, 'EF'), (5, '5'), (6, 'xyz'), (9, '9')]
ans_out = union(blocks1, blocks2, join=''.join)
assert ans_out == ans_ref
# ============================================================================
class TestMemory:
def test___init__(self):
obj = Memory(items_type=str)
assert obj.blocks == []
assert obj.items_type == str
assert obj.items_join == ''.join
assert obj.autofill is None
assert obj.automerge
with pytest.raises(ValueError):
Memory(items='ABC', blocks=[(0, 'abc')], items_type=str)
obj = Memory(items='ABC', start=1, items_type=str)
assert obj.blocks == [(1, 'ABC')]
obj = Memory(blocks=[(1, 'ABC')], automerge=False, items_type=str)
assert obj.blocks == [(1, 'ABC')]
def test___str__doctest(self):
memory = Memory(items_type=str)
memory.blocks = [(1, 'ABC'), (7, 'xyz')]
ans_out = str(memory)
ans_ref = 'ABCxyz'
assert ans_out == ans_ref
def test___bool__(self):
obj = Memory(items_type=str)
assert not bool(obj)
obj.blocks = [(1, 'ABC')]
assert bool(obj)
def test___eq__(self):
obj1 = Memory(items_type=str)
obj1.blocks = [(1, 'ABC'), (7, 'xyz')]
obj2 = Memory()
obj2.blocks = [(1, 'ABC'), (7, 'xyz')]
assert obj1 == obj2
blocks2 = [(1, | |
two digits are pressed accidentally (e.g.
pressing/missing extra 0s) or a comma is missing (e.g. 37.7 as
377).
Parameters
----------
series: pd.Series
The series to correct.
range:
The desired range to accept the correction.
orders: list, default [10, 100]
The orders of magnitude to try.
Returns
-------
pd.Series
The corrected series.
See Also
--------
Examples: :ref:`sphx_glr__examples_correctors_plot_order_magnitude.py`.
Examples
--------
.. doctest::
>>> body_temperature = pd.Series([36, 366, 3660])
>>> order_magnitude_correction(body_temperature, range=[30, 43])
[36, 36.6, 36.66]
"""
# Create transform
transform = pd.to_numeric(series.copy(deep=True))
# Range
low, high = range
# Loop
for i in orders:
aux = (transform / i)
idx = aux.between(low, high)
transform[idx] = aux[idx]
# Return
return transform
def unit_correction(series, unit_from=None, unit_to=None, range=None, record=None):
"""Corrects issues related with units.
.. todo: It can be implemented in a better way.
if isintance(series, pd.Series):
transformed = series.transformed # the copy
Do stuff
return depending on input parameter
Parameters
----------
series: pd.Series
The series to correct
unit_from:
The unit that has been used to record the series.
unit_to:
The unit the series should be converted too.
range:
The range to verify whether the conversion is valid.
Returns
-------
Examples
--------
"""
# Import
#from datablend.core.settings import ureg
# Libraries for pint
from pint import UnitRegistry
# Create
ureg = UnitRegistry() # auto_reduce_dimensions = False
# Create transformed
transformed = pd.Series(series)
# Transformed
transformed = (transformed.values * ureg(unit_from)).to(unit_to)
# Convert to unit
if not isinstance(series, pd.Series):
return transformed
return pd.Series(index=series.index, data=transformed)
# Check in between
#between = pd.Series(v).between(low, high)
def range_correction(series, range=None, value=np.nan):
"""Corrects issues with ranges.
Some values collected are not within the ranges. They could
also be removed using the IQR rule, but if we know the limits
we can filter them as errors instead of outliers.
.. todo: Check if any outside first otherwise return series.
.. todo: Warn if replace value is outside range.
.. todo: Include several options for value:
value=np.nan
value=number
value=(low, high)
value='edges'
tidy.dbp = \
tidy.dbp.transform(range_correction, range=(40, 100))
Parameters
----------
series: pd.Series (numeric series)
The pandas series to correct
range: range or tuple (min, max)
The range
value: default np.nan
The value to use for corrections
Returns
-------
pd.Series
See Also
--------
Examples: :ref:`sphx_glr__examples_correctors_plot_range.py`.
Examples
--------
"""
# Create transform
transform = pd.to_numeric(series.copy(deep=True))
# Range
low, high = range
# Correction
transform[~transform.between(low, high)] = value
# Return
return transform
def unique_true_value_correction(series, value=np.nan, **kwargs):
"""Ensure there is only one True value.
For example, for variable representing events that can only
occur once such as event_death, we can correct inconsistent
series so that only one True value appears.
.. note: If len(series) <=1 return series
.. note: Set to value=np.nan or value=False
.. note: What if there is no true value?
.. note: Rename to one_true_value_correction
tidy.event_admission = \
tidy.groupby(by=['StudyNo']) \
.event_admission \
.transform(unique_true_value_correction)
Parameters
----------
series: pd.Series
The boolean series to correct.
**kwargs:
Arguments to pass to the pandas duplicated function. In
particular the argument 'keep' which allows (i) 'first'
to keep first appearance, (ii) 'last' to keep last
appearance or (iii) 'False' which keeps all appearences.
Returns
-------
pd.Series
The corrected series
See Also
--------
Examples: :ref:`sphx_glr__examples_correctors_plot_unique_true_value.py`.
Examples
--------
"""
# Ensure that it is boolean
transform = series.copy(deep=True)
transform = to_boolean(transform)
# There is no true value!
if transform.sum() == 0:
#print("No value found!")
return series
# It is already unique
if transform.sum() == 1:
return series
# More than one
#transform[transform.duplicated(**kwargs)] = value
# Find duplicates only for Trues
idxs = transform[transform].duplicated(**kwargs)
# From those duplicated set values
transform.loc[idxs[idxs].index] = value
# Return
return transform
def causal_correction(x, y):
"""This method is not implemented yet."""
# if x is one then y must be one.
pass
def compound_feature_correction(series, compound, verbose=0):
"""Corrects compound boolean features.
Ensures that the values of a compound feature (e.g. bleeding)
and its subcategories (e.g. bleeding_skin, bleeding_nose, ...)
are consistent. The bleeding feature is set to True if the
current value is True or if any of the bleeding sites is True;
that is, series | compound.any().
.. note: Option to return bleeding other if it is not included
in the compound and the series (bleeding) has True but
no subcategory (bleeding site) is found.
.. warning: Works with pd.NA but not with np.nan!
Parameters
----------
series: pd.Series
The series to correct
compound: pd.DataFrame
The DataFrame with subcategories.
Returns
-------
pd.Series
The corrected series.
See Also
--------
Examples: :ref:`sphx_glr__examples_correctors_plot_compound.py`.
Examples
--------
"""
if verbose > 5:
print("Applying... compound_feature_correction to {0:>20} | {1}" \
.format(series.name, compound.columns.tolist()))
# Copy data
transform = series.copy(deep=True)
# Convert to dtypes
transform = transform.convert_dtypes()
# Any true
any = compound.convert_dtypes().any(axis=1)
# Set transform
transform = transform | any
# other = transform & ~any
# Return
return transform
def date_corrections(x, years=None, use_swap_day_month=True):
"""Applies various possible date corrections
Parameters
----------
x:
years:
swap_day_month:
Returns
-------
"""
# Original value
corrections = [x]
# Swapping day month
corrections += [swap_day_month(x)]
corrections += [add_to_date(x, year=1)]
corrections += [add_to_date(x, year=-1)]
corrections += [add_to_date(x, month=1)]
corrections += [add_to_date(x, month=-1)]
# Range of possible years
if years is not None:
corrections += [x.replace(year=int(y)) for y in years]
# Return
return pd.Series(pd.Series(corrections).unique())
def date_outliers_correction(series,
max_days_to_median=20,
outliers_as_nat=False):
"""
This method...
.. warning: The selection of the first column should not be
necessary. It should work just with the indx.
series[outliers] = r[idx].iloc[:, 0]
.. todo: Include different modes to compute the outliers
and different methods to correct the dates if
required:
outliers = np.abs(series - series.mean()) > coef * series.std()
outliers = np.abs(series - series.median()) > coef * series.std()
.. warning: Unfortunatly it does not work with apply!?
.. todo: It is contained within not outliers dates.
.. todo:
Parameters
----------
series
max_day_difference
Returns
-------
"""
# Compute days of difference between day and median
outliers = (series - series.median()) \
.dt.days.abs() > max_days_to_median
# Return original
if not outliers.any():
return series
# Unique years
years = series[~outliers].dropna().dt.year.unique()
# Compute various corrections
r = series[outliers].apply( \
date_corrections, years=years)
# Compute days
r_days = (r - series.median()).abs()
r_days = r_days / np.timedelta64(1, 'D')
# Date closer enough not found
if not (r_days < max_days_to_median).any(axis=1).any():
if outliers_as_nat:
transform = series.copy(deep=True)
transform[outliers] = pd.NaT
"""
print("------")
print(r_days)
print()
print(r)
print()
print(series.dt.normalize().median())
print()
print(series.dt.normalize().value_counts())
"""
# Find index with smaller days of difference
idx = (r - series.median()).abs().idxmin(axis=1)
# Replace in series
transform = series.copy(deep=True)
transform[outliers] = r[idx].iloc[:, 0]
# Return transformed
return transform
def outlier_dates_correction(series, coef=2.0):
"""Corrects the dates that are outliers.
It receives all the dates in which samples were collected,
for example for a patient and tries to (i) identify
outliers and (ii) correct them with the best possible
date.
.. note: Using mean/std for outliers...
.. note: Should I use days which is more interpretable?
.. warning: Remember to include always the raw value
just in case that was the best! Should I
check only values that are outside range?
Parameters
----------
series: series with datetime64[ns]
coeff:
Returns
-------
datetime64[ns] series with corrected dates.
"""
# Check datetime series or str series (errors='raise)
# Copy series too!
# Find outliers
outliers = np.abs(series - series.mean()) > coef * series.std()
"""
print(outliers)
print(np.abs(series - series.mean()))
print(coef * series.std())
print(series.quantile([0.05, 0.95]))
from scipy.spatial.distance import pdist, cdist
from itertools import product
#e = np.abs(series - series.mean())
e = (series - series.mean()).abs().dt.days
p = np.array(list(product(e, e)))
#p = np.array([series, series])
print(p)
a = pd.DataFrame(p)
a = a.apply(lambda x: np.abs(x[0]-x[1]), axis=1)
print(a)
print(cdist(p))
#e = series.astype(int)
#print(e)
# / np.timedelta64(-1, 'D')
print(e)
import sys
sys.exit()
a = list(product(e, e))
#print(a)
print(pdist(np.array(a)))
#print(cdist(series.values, series.values))
import sys
sys.exit()
"""
"""
if len(series) < 3:
return series
"""
"""
print("\n\n\nFinding outliers...")
print("Consecutive distances:")
print(ddiff)
print("\nThe mean")
print(mean)
print("\nThe difference")
print(dff)
print("\nOutliers")
print(outliers)
"""
if len(series) < 3:
return series
ddiff = series.diff().dt.days.abs()
mean = series[ddiff <= 3].mean()
dff = (series - mean).abs()
outliers = dff.dt.days > 10
# Do corrections
if outliers.any():
| |
<reponame>sophiaas/alphacsc
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2017 by <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Dictionary learning based on CBPDN sparse coding"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
import copy
import numpy as np
from sporco.util import u
import sporco.linalg as sl
import sporco.cnvrep as cr
from sporco.fista import ccmod
from sporco.admm import dictlrn
import sporco.admm.cbpdn as Acbpdn
from sporco.fista import cbpdn as Fcbpdn
__author__ = """<NAME> <<EMAIL>>"""
class ConvBPDNDictLearn(dictlrn.DictLearn):
r"""**Class inheritance structure**
.. inheritance-diagram:: ConvBPDNDictLearn
:parts: 2
|
Dictionary learning based on ConvBPDN (FISTA) and ConvCnstrMOD
(FISTA) :cite:`garcia-2017-convolutional`.
Solve the optimisation problem
.. math::
\mathrm{argmin}_{\mathbf{d}, \mathbf{x}} \;
(1/2) \sum_k \left \| \sum_m \mathbf{d}_m * \mathbf{x}_{k,m} -
\mathbf{s}_k \right \|_2^2 + \lambda \sum_k \sum_m
\| \mathbf{x}_{k,m} \|_1 \quad \text{such that}
\quad \mathbf{d}_m \in C \;\;,
where :math:`C` is the feasible set consisting of filters with
unit norm and constrained support, via interleaved alternation
between FISTA steps of the :class:`.fista.cbpdn.ConvBPDN` and
:class:`.ConvCnstrMOD` problems. The multi-channel variants
supported by :class:`.ConvCnstrMOD` are also supported.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \sum_k \|
\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k \|_2^2`
``RegL1`` : Value of regularisation term :math:`\sum_k \sum_m
\| \mathbf{x}_{k,m} \|_1`
``Cnstr`` : Constraint violation measure
``X_F_Btrack`` : Value of objective function for CSC problem
``X_Q_Btrack`` : Value of Quadratic approximation for CSC problem
``X_ItBt`` : Number of iterations in bactracking for CSC problem
``X_L`` : Inverse of gradient step parameter for CSC problem
``D_F_Btrack`` : Value of objective function for CDU problem
``D_Q_Btrack`` : Value of Quadratic approximation for CDU problem
``D_ItBt`` : Number of iterations in bactracking for CDU problem
``D_L`` : Inverse of gradient step parameter for CDU problem
``Time`` : Cumulative run time
"""
class Options(dictlrn.DictLearn.Options):
"""ConvBPDNDictLearn dictionary learning algorithm options.
Options include all of those defined in
:class:`.dictlrn.DictLearn.Options`, together with
additional options:
``AccurateDFid`` : Flag determining whether data fidelity term is
estimated from the value computed in the X update (``False``) or is
computed after every outer iteration over an X update and a D
update (``True``), which is slower but more accurate.
``DictSize`` : Dictionary size vector.
``CBPDN`` : Options :class:`.cbpdn.GenericConvBPDN.Options`
``CCMOD`` : Options :func:`.ccmod.ConvCnstrMOD.Options`
"""
defaults = copy.deepcopy(dictlrn.DictLearn.Options.defaults)
defaults.update({'DictSize' : None, 'AccurateDFid' : False,
'CBPDN' : copy.deepcopy(
Fcbpdn.ConvBPDN.Options.defaults)})
def __init__(self, opt=None):
"""Initialise ConvDictLearnFista dictionary learning algorithm
options.
"""
self.defaults.update({'CCMOD' : copy.deepcopy(
ccmod.ConvCnstrMOD.Options.defaults)})
dictlrn.DictLearn.Options.__init__(self, {
'CBPDN': Fcbpdn.ConvBPDN.Options(
{'MaxMainIter': 1,
'BackTrack': {'Eta': 1.2, 'MaxIter': 50}}),
'CCMOD': ccmod.ConvCnstrMOD.Options({'MaxMainIter': 1,
'BackTrack': {'Eta': 1.2, 'MaxIter': 50}})
})
if opt is None:
opt = {}
self.update(opt)
def __init__(self, D0, S, lmbda=None, opt=None, dimK=1, dimN=2):
"""
Initialise a ConvBPDNDictLearn object with problem size and options.
Parameters
----------
D0 : array_like
Initial dictionary array
S : array_like
Signal array
lmbda : float
Regularisation parameter
opt : :class:`ConvBPDNDictLearn.Options` object
Algorithm options
dimK : int, optional (default 1)
Number of signal dimensions. If there is only a single input
signal (e.g. if `S` is a 2D array representing a single image)
`dimK` must be set to 0.
dimN : int, optional (default 2)
Number of spatial/temporal dimensions
"""
if opt is None:
opt = ConvBPDNDictLearn.Options()
self.opt = opt
# Get dictionary size
if self.opt['DictSize'] is None:
dsz = D0.shape
else:
dsz = self.opt['DictSize']
# Construct object representing problem dimensions
cri = cr.CDU_ConvRepIndexing(dsz, S, dimK, dimN)
# Normalise dictionary
D0 = cr.Pcn(D0, dsz, cri.Nv, dimN, cri.dimCd, crp=True,
zm=opt['CCMOD', 'ZeroMean'])
# Modify D update options to include initial values for X
opt['CCMOD'].update({'X0' : cr.zpad(
cr.stdformD(D0, cri.C, cri.M, dimN), cri.Nv)})
# Create X update object
xstep = Fcbpdn.ConvBPDN(D0, S, lmbda, opt['CBPDN'],
dimK=dimK, dimN=dimN)
# Create D update object
dstep = ccmod.ConvCnstrMOD(None, S, dsz, opt['CCMOD'],
dimK=dimK, dimN=dimN)
print("L xstep in cbpdndl: ", xstep.L)
print("L dstep in cbpdndl: ", dstep.L)
# Configure iteration statistics reporting
isfld = ['Iter', 'ObjFun', 'DFid', 'RegL1', 'Cnstr']
hdrtxt = ['Itn', 'Fnc', 'DFid', u('ℓ1'), 'Cnstr']
hdrmap = {'Itn': 'Iter', 'Fnc': 'ObjFun', 'DFid': 'DFid',
u('ℓ1'): 'RegL1', 'Cnstr': 'Cnstr'}
if self.opt['AccurateDFid']:
isxmap = {'X_F_Btrack': 'F_Btrack', 'X_Q_Btrack': 'Q_Btrack',
'X_ItBt': 'IterBTrack', 'X_L': 'L',
'X_Rsdl': 'Rsdl'}
evlmap = {'ObjFun': 'ObjFun', 'DFid': 'DFid', 'RegL1': 'RegL1'}
else:
isxmap = {'ObjFun': 'ObjFun', 'DFid': 'DFid', 'RegL1': 'RegL1',
'X_F_Btrack': 'F_Btrack', 'X_Q_Btrack': 'Q_Btrack',
'X_ItBt': 'IterBTrack', 'X_L': 'L',
'X_Rsdl': 'Rsdl'}
evlmap = {}
# If Backtracking enabled in xstep display the BT variables also
if xstep.opt['BackTrack', 'Enabled']:
isfld.extend(['X_F_Btrack', 'X_Q_Btrack', 'X_ItBt', 'X_L',
'X_Rsdl'])
hdrtxt.extend(['F_X', 'Q_X', 'It_X', 'L_X'])
hdrmap.update({'F_X': 'X_F_Btrack','Q_X': 'X_Q_Btrack',
'It_X': 'X_ItBt', 'L_X': 'X_L'})
else: # Add just L value to xstep display
isfld.extend(['X_L', 'X_Rsdl'])
hdrtxt.append('L_X')
hdrmap.update({'L_X': 'X_L'})
isdmap = {'Cnstr': 'Cnstr', 'D_F_Btrack': 'F_Btrack',
'D_Q_Btrack': 'Q_Btrack', 'D_ItBt': 'IterBTrack',
'D_L': 'L', 'D_Rsdl': 'Rsdl'}
# If Backtracking enabled in dstep display the BT variables also
if dstep.opt['BackTrack', 'Enabled']:
isfld.extend(['D_F_Btrack', 'D_Q_Btrack', 'D_ItBt', 'D_L',
'D_Rsdl', 'Time'])
hdrtxt.extend(['F_D', 'Q_D', 'It_D', 'L_D'])
hdrmap.update({'F_D': 'D_F_Btrack', 'Q_D': 'D_Q_Btrack',
'It_D': 'D_ItBt', 'L_D': 'D_L'})
else: # Add just L value to dstep display
isfld.extend(['D_L', 'D_Rsdl', 'Time'])
hdrtxt.append('L_D')
hdrmap.update({'L_D': 'D_L'})
isc = dictlrn.IterStatsConfig(isfld=isfld, isxmap=isxmap,
isdmap=isdmap, evlmap=evlmap,
hdrtxt=hdrtxt, hdrmap=hdrmap)
# Call parent constructor
super(ConvBPDNDictLearn, self).__init__(xstep, dstep, opt, isc)
def getdict(self, crop=True):
"""Get final dictionary. If ``crop`` is ``True``, apply
:func:`.cnvrep.bcrop` to returned array.
"""
return self.dstep.getdict(crop=crop)
def evaluate(self):
"""Evaluate functional value of previous iteration"""
if self.opt['AccurateDFid']:
D = self.getdict(crop=False)
X = self.getcoef()
Df = sl.rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
Xf = sl.rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
Sf = self.xstep.Sf
Ef = np.sum(Df * Xf, axis=self.xstep.cri.axisM, keepdims=True) - Sf
dfd = sl.rfl2norm2(Ef, self.xstep.S.shape,
axis=self.xstep.cri.axisN)/2.0
rl1 = np.sum(np.abs(X))
return dict(DFid=dfd, RegL1=rl1, ObjFun=dfd+self.xstep.lmbda*rl1)
else:
return None
class MixConvBPDNDictLearn(dictlrn.DictLearn):
r"""**Class inheritance structure**
.. inheritance-diagram:: MixConvBPDNDictLearn
:parts: 2
|
Dictionary learning based on ConvBPDN (ADMM) and ConvCnstrMOD (FISTA)
:cite:`garcia-2017-convolutional`.
Solve the optimisation problem
.. math::
\mathrm{argmin}_{\mathbf{d}, \mathbf{x}} \;
(1/2) \sum_k \left \| \sum_m \mathbf{d}_m * \mathbf{x}_{k,m} -
\mathbf{s}_k \right \|_2^2 + \lambda \sum_k \sum_m
\| \mathbf{x}_{k,m} \|_1 \quad \text{such that}
\quad \mathbf{d}_m \in C \;\;,
where :math:`C` is the feasible set consisting of filters with
unit norm and constrained support, via interleaved alternation
between ADMM steps of the :class:`.admm.cbpdn.ConvBPDN` and FISTA steps
:class:`.ConvCnstrMOD` problems. The multi-channel variants
supported by :class:`.ConvCnstrMOD` are also supported.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \sum_k \|
\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k \|_2^2`
``RegL1`` : Value of regularisation term :math:`\sum_k \sum_m
\| \mathbf{x}_{k,m} \|_1`
``Cnstr`` : Constraint violation measure
``XPrRsdl`` : Norm of X primal residual
``XDlRsdl`` : Norm of X dual residual
``XRho`` : X penalty parameter
``D_F_Btrack`` : Value of objective function for CDU problem
``D_Q_Btrack`` : Value of Quadratic approximation for CDU problem
``D_ItBt`` : Number of iterations in bactracking for CDU problem
``D_L`` : Inverse of gradient step parameter for CDU problem
``Time`` : Cumulative run time
"""
class Options(dictlrn.DictLearn.Options):
"""MixConvBPDNDictLearn dictionary learning algorithm options.
Options include all of those defined in
:class:`.dictlrn.DictLearn.Options`, together with
additional options:
``AccurateDFid`` : Flag determining whether data fidelity term is
estimated from the value computed in the X update (``False``) or is
computed after every outer iteration over an X update and a D
update (``True``), which is slower but more accurate.
``DictSize`` : Dictionary size vector.
``CBPDN`` : Options :class:`.admm.cbpdn.ConvBPDN.Options`
``CCMOD`` : Options :func:`.ccmod.ConvCnstrMOD.Options`
"""
defaults = copy.deepcopy(dictlrn.DictLearn.Options.defaults)
defaults.update({'DictSize' : None, 'AccurateDFid' : False,
'CBPDN' : copy.deepcopy(
Acbpdn.ConvBPDN.Options.defaults)})
def __init__(self, opt=None):
"""Initialise ConvBPDNAdmm_DictLearnFista dictionary learning
algorithm options.
"""
self.defaults.update({'CCMOD' : copy.deepcopy(
ccmod.ConvCnstrMOD.Options.defaults)})
dictlrn.DictLearn.Options.__init__(self, {
'CBPDN': Acbpdn.ConvBPDN.Options({'MaxMainIter': 1,
'AutoRho': {'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}}),
'CCMOD': ccmod.ConvCnstrMOD.Options(
{'MaxMainIter': 1,
'BackTrack': {'Eta': 1.2, 'MaxIter': 50}})
})
if | |
<gh_stars>0
# Copyright 2020 Dr. <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
installed_devices = []
#bme280 has been installed in SentinAir on 2020-12-15_10-00-51
# do not remove or modify the next three lines below!!!
from devices.bme280 import Bme280
bme280_obj = Bme280()
installed_devices.append(bme280_obj)
#mcp342x has been installed in SentinAir on 2020-11-02_11-55-33
# do not remove or modify the next three lines below!!!
from devices.mcp342x import Mcp342x
mcp342x_obj = Mcp342x()
installed_devices.append(mcp342x_obj)
#bh1750 has been installed in SentinAir on 2020-11-02_11-55-19
# do not remove or modify the next three lines below!!!
from devices.bh1750 import Bh1750
bh1750_obj = Bh1750()
installed_devices.append(bh1750_obj)
#v72m has been installed in SentinAir on 2020-07-31_04-48-53
# do not remove or modify the next three lines below!!!
from devices.v72m import V72m
v72m_obj = V72m()
installed_devices.append(v72m_obj)
#multisensor_board has been installed in SentinAir on 2020-07-31_01-10-48
# do not remove or modify the next three lines below!!!
from devices.multisensor_board import Multisensor_board
multisensor_board_obj = Multisensor_board()
installed_devices.append(multisensor_board_obj)
#irca1 has been installed in SentinAir on 2020-07-27_10-06-03
from devices.irca1 import Irca1
irca1_obj = Irca1()
installed_devices.append(irca1_obj)
#go3 has been installed in SentinAir on 2020-05-29_15-41-24
from devices.go3 import Go3
go3_obj = Go3()
installed_devices.append(go3_obj)
#af22 has been installed in SentinAir on 2020-05-27_13-19-26
from devices.af22 import Af22
af22_obj = Af22()
installed_devices.append(af22_obj)
#ac32 has been installed in SentinAir on 2020-05-20_15-22-54
from devices.ac32 import Ac32
ac32_obj = Ac32()
installed_devices.append(ac32_obj)
#co12m has been installed in SentinAir on 2020-05-20_15-22-39
# do not remove or modify the next three lines below!!!
from devices.co12m import Co12m
co12m_obj = Co12m()
installed_devices.append(co12m_obj)
# do not remove or modify the next three lines below!!!
#lcss_adapter has been installed in SentinAir on 2020-05-20_15-22-17
# do not remove or modify the next three lines below!!!
from devices.lcss_adapter import Lcss_adapter
lcss_adapter_obj = Lcss_adapter()
installed_devices.append(lcss_adapter_obj)
# do not remove or modify the next three lines below!!!
#nox405 has been installed in SentinAir on 2020-05-20_15-21-55
# do not remove or modify the next three lines below!!!
from devices.nox405 import Nox405
nox405_obj = Nox405()
installed_devices.append(nox405_obj)
#o342 has been installed in SentinAir on 2020-05-20_15-21-48
# do not remove or modify the next three lines below!!!
from devices.o342 import O342
o342_obj = O342()
installed_devices.append(o342_obj)
#pms3003 has been installed in SentinAir on 2020-05-20_15-21-41
# do not remove or modify the next three lines below!!!
from devices.pms3003 import Pms3003
pms3003_obj = Pms3003()
installed_devices.append(pms3003_obj)
# do not remove or modify the next three lines below!!!
#multisensore has been installed in SentinAir on 2020-05-09_22-23-17
# do not remove or modify the next three lines below!!!
import copy
import serial.tools.list_ports
import serial
import time
import _thread
import logging
import RPi.GPIO as GPIO
from socket import *
import sys
from datetime import datetime
import os
#graphic management libraries
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
#end
#connected devices are stored here
connected_devices = []
#sentinair files paths
DEFAULT_DIR = "/home/pi/sentinair"
DATA_DIR = "/var/www/html/data"
IMG_DIR = "/var/www/html/img"
LOG_FILENAME = '/var/www/html/log/sentinair-log.txt'
IMAP_SMTP_FILE = "/home/pi/sentinair/imap-smtp-interface.py"
MAIL_CONFIG_FILE = "/home/pi/sentinair/mail-config.sentinair"
#connection types of devices
USB_CONNECTION_TYPE = "usb"
SERIAL_CONNECTION_TYPE = "serial"
ETH_CONNECTION_TYPE = "eth"
SPI_CONNECTION_TYPE = "spi"
I2C_CONNECTION_TYPE = "i2c"
#messages to send to user interfaces
INIT_LOG_MSG = 'SENTINAIR MANAGER by <NAME> setting up...'
INIT_GPIO_ERR_LOG_MSG = 'Error in initializing GPIO:'
SHUTDOWN_BUTTON_PRESSED = 'SentinAir shut down by stop button pressed'
UDP_CLI_PORT_ERR_LOG_MSG = 'Unable to start SentinAir command line user interface: udp port not opening!'
UDP_CLI_DATA_ERR_LOG_MSG = 'Error in opening data channel: udp port not opening'
DATA_SERVER_ERR_LOG_MSG = 'Error in starting data server.'
DATA_SERVER_PORT_ERR_LOG_MSG = 'Unable to start the SentinAir data server: data port not opening.'
DATA_SENDING_ERR_LOG_MSG = 'Unable to send measurement on data port.'
CMD_IN_ERR_LOG_MSG = 'Unable to get commands on udp port:'
CMD_OUT_ERR_LOG_MSG = 'Unable to send commands outputs on udp port:'
STATUS_FILE_ERR_LOG_MSG = 'Unable to update status file:'
INIT_CAPT_GPIO_ERR_LOG_MSG_1 = 'GPIO error on init capture 1:'
INIT_CAPT_GPIO_ERR_LOG_MSG_2 = 'GPIO error on init capture 2:'
INIT_CAPT_GPIO_ERR_LOG_MSG_3 = 'GPIO error on init capture 3:'
INIT_CAPT_ERR_LOG_MSG = 'Error occurred on init_session:'
INIT_CAPT_MAKE_ERR_LOG_MSG = 'Error on make_recoed in init capture:'
MAKE_ERR_LOG_MSG = 'Error on make_record capture:'
SYS_READY_LOG_MSG = 'SentinAir ready!'
AVG_INIT_ERR_LOG_MSG = 'Error in calculating means on init:'
AVG_HOUR_ERR_LOG_MSG = 'Error in calculating hourly means:'
AVG_DAY_ERR_LOG_MSG = 'Error in calculating daily means:'
DATA_PORT_OPEN_ERR_LOG_MSG = "Udp data port opening failed"
MAIL_CONFIG_NULL_LOG_MSG = "E-mail account is not present: the imap-smtp interface will not start"
INIT_MSG = "\n\n" + INIT_LOG_MSG + "\n"
UDP_CLI_PORT_ERR_MSG = "\n" + UDP_CLI_PORT_ERR_LOG_MSG + "\n"
INIT_GPIO_ERR_MSG = "\n" + INIT_GPIO_ERR_LOG_MSG + '\n'
UDP_CLI_PORT_ERR_MSG = "\n" + UDP_CLI_PORT_ERR_LOG_MSG + "\n"
DATA_SERVER_ERR_MSG = "\n" + DATA_SERVER_ERR_LOG_MSG + "\n"
DATA_SERVER_PORT_ERR_MSG = "\n" + DATA_SERVER_PORT_ERR_LOG_MSG + "\n"
DATA_SENDING_ERR_MSG = "\n" + DATA_SENDING_ERR_LOG_MSG + "\n"
DATA_FILE_OPENING_ERR = "\nImpossible opening storage data file. Measurement sesssion stopped with error"
MEAS_STOP_ERR_MSG = "\nMeasurement session stopped with error"
SYS_READY = "\n" + SYS_READY_LOG_MSG + "\n"
INV_CMD = "\nCommand not valid!\n"
DATA_PORT_OPEN_ERR_MSG = "\n" + DATA_PORT_OPEN_ERR_LOG_MSG + "\n"
MAIL_CONFIG_NULL = "\nE-mail account is not present: the imap-smtp interface will not start\n"
ERRS_STR = "Impossible to execute the command:\n" +\
"sampling session ongoing,\n" + "if you want to execute it\n" +\
"first press 'b' to stop the session!\n"
ERRC_STR = "Impossible to execute the command:\n" +\
"sampling session ongoing,\n" + "if you want to check on devices\n" +\
"first press 'b' to stop the session!\n"
ERRS_STR_1 = "Impossible to execute the command:\n" +\
"no device connected,\n" + "if you want to execute it\n" +\
"first press 's' to search and connect them!\n"
USAGE_STR = "press i[ENTER] to get info on the current status\n" +\
"press q[ENTER] to quit the command consolle \n" +\
"press h[ENTER] for command viewing\n" +\
"press s[ENTER] for searching devices\n" +\
"press c[ENTER] for checking devices\n" +\
"press b[ENTER] for stopping sampling sessions\n" +\
"press s,<sampling rate in seconds>[ENTER] to start and log a sampling session\n"
NO_MEAS = "No measurement session ongoing"
#strings markers
__ERROR = "__error"
END_STR = ">>>end__"
#system settings
MINIMUM_SAMPLING_RATE = 30
UDP_SERVICE_PORT = 16670
DATA_PORT = 24504
DATA_ADDRESS = ('localhost', DATA_PORT)
BUFSIZE = 1024
##### GRAPHIC
##### routine for plots generation
def plotter(heads):
global rate
global curfile
global datacols
global datacolsh
global datacolsd
nowlen = 0
prevlen = 0
nowlenh = 0
prevlenh = 0
nowlend = 0
prevlend = 0
fileh1 = curfile.rstrip(".txt")
fileh = fileh1 + "_hourlymeans.txt"
filed = fileh1 + "_dailymeans.txt"
while(rate!=0):
nowlen = len(datacols[0])
if (nowlen > 1) and (prevlen != nowlen):
plot_file(curfile,heads)
if rate == 0:
datacols = []
return
prevlen = nowlen
#hourlymean
nowlenh = len(datacolsh[0])
if (nowlenh > 1) and (prevlenh != nowlenh):
plot_file_h(fileh,heads)
if rate == 0:
datacolsh = []
return
prevlenh = nowlenh
#dailymean
nowlend = len(datacolsd[0])
if (nowlend > 1) and (prevlend != nowlend):
plot_file_d(filed,heads)
if rate == 0:
datacolsd = []
return
prevlend = nowlend
#routine for plotting data of the measurements file
def plot_file(filename,header):
global datacols
global rate
fn1 = filename.replace(DATA_DIR + "/",'')
fn = fn1.rstrip("txt")
j = 0
for cols in header:
if j == 0:
j=j+1
continue
try:
xm2 = [datetime.strptime(d,"%d/%m/%Y_%H:%M:%S") for d in datacols[0]]
xm = mpl.dates.date2num(xm2)
fig = plt.figure()
graph = fig.add_subplot(111)
red_patch = mpatches.Patch(color='red', label=header[j])
graph.legend(handles=[red_patch])
hfmt = mpl.dates.DateFormatter("%d/%m/%Y_%H:%M:%S")
graph.xaxis.set_major_formatter(hfmt)
if len(datacols[j]) > len(xm):
df = len(datacols[j])-len(xm)
yp = datacols[j][:-df]
if len(xm) > len(datacols[j]):
df = len(xm)- len(datacols[j])
xm = xm[:-df]
if len(datacols[j]) == len(xm):
yp = datacols[j]
if rate == 0:
datacols = []
plt.close('all')
return
graph.plot(xm,yp,'r')
plt.setp(graph.get_xticklabels(), rotation=30, ha="right")
ln1 = len(xm)
if ln1<10:
graph.xaxis.set_major_locator(plt.LinearLocator(numticks=ln1))
graph.yaxis.set_major_locator(plt.LinearLocator(numticks=ln1))
else:
graph.xaxis.set_major_locator(plt.MaxNLocator(10))
graph.yaxis.set_major_locator(plt.MaxNLocator(10))
text = graph.annotate("Plotted by Sentinair device\n developed by\n Dr. <NAME> 2019",\
xy=(.3,.7),xycoords='figure fraction',rotation=-30,size=16,alpha=0.2)
graph.set_xlabel('Date_time')
graph.grid(True)
head = header[j].split("_")
except Exception as e:
logging.warning("Error in plotting data:\r\n",exc_info=True)
return
try:
ylabel = head[-1]
except:
ylabel = header[j]
try:
graph.set_ylabel(ylabel)
header[j] = header[j].replace('%','')
header[j] = header[j].replace('/','')
imgdir = IMG_DIR.rstrip("/")
fig.savefig(imgdir + "/" + fn + header[j] + ".png",dpi=80,format='png',bbox_inches='tight')
plt.close('all')
except Exception as e:
logging.warning("Error in saving plot image data\r\n:",exc_info=True)
return
j=j+1
#routine for plotting data of the measurements file related to the hourly averages
def plot_file_h(filename,header):
global datacolsh
global rate
fn1 = filename.lstrip(DATA_DIR)
fn = fn1.rstrip("txt")
j = 0
for cols in header:
if j == 0:
j=j+1
continue
try:
xm2 = [datetime.strptime(d,"%d/%m/%Y_%H") for d in datacolsh[0]]
xm = mpl.dates.date2num(xm2)
fig = plt.figure()
graph = fig.add_subplot(111)
red_patch = mpatches.Patch(color='red', label=header[j])
graph.legend(handles=[red_patch])
hfmt = mpl.dates.DateFormatter("%d/%m/%Y_%H")
graph.xaxis.set_major_formatter(hfmt)
if len(datacolsh[j]) > len(xm):
df = len(datacolsh[j])-len(xm)
yp = datacolsh[j][:-df]
if len(xm) > len(datacolsh[j]):
df = len(xm)- len(datacolsh[j])
xm = xm[:-df]
if len(datacolsh[j]) == len(xm):
yp = datacolsh[j]
if rate == 0:
datacolsh = []
plt.close('all')
return
| |
with which to operate, a specification of the chemical index for the atom moving
(needs to be consistent with jumpnetwork and crys), and then the number of shells.
In this case, ``shells`` = number of successive "jumps" from a state. As an example,
in FCC, 1 shell = 1st neighbor, 2 shell = 1-4th neighbors.
"""
def __init__(self, jumpnetwork, crys, chem, Nshells=0, originstates=False, lattice=False):
"""
Initiates a star set generator for a given jumpnetwork, crystal, and specified
chemical index. Does not include "origin states" by default; these are PairStates that
iszero() is True; they are only needed if crystal has a nonzero VectorBasis.
:param jumpnetwork: list of symmetry unique jumps, as a list of list of tuples; either
``((i,j), dx)`` for jump from i to j with displacement dx, or
``((i,j), R)`` for jump from i in unit cell 0 -> j in unit cell R
:param crys: crystal where jumps take place
:param chem: chemical index of atom to consider jumps
:param Nshells: number of shells to generate
:param originstates: include origin states in generate?
:param lattice: which form does the jumpnetwork take?
"""
# jumpnetwork_index: list of lists of indices into jumplist; matches structure of jumpnetwork
# jumplist: list of jumps, as pair states (i=initial state, j=final state)
# states: list of pair states, out to Nshells
# Nstates: size of list
# stars: list of lists of indices into states; each list are states equivalent by symmetry
# Nstars: size of list
# index[Nstates]: index of star that state belongs to
# empty StarSet
if all(x is None for x in (jumpnetwork, crys, chem)): return
self.jumpnetwork_index = [] # list of list of indices into...
self.jumplist = [] # list of our jumps, as PairStates
ind = 0
for jlist in jumpnetwork:
self.jumpnetwork_index.append([])
for ij, v in jlist:
self.jumpnetwork_index[-1].append(ind)
ind += 1
if lattice:
PS = PairState.fromcrys_latt(crys, chem, ij, v)
else:
PS = PairState.fromcrys(crys, chem, ij, v)
self.jumplist.append(PS)
self.crys = crys
self.chem = chem
self.generate(Nshells, threshold=crys.threshold, originstates=originstates)
def __str__(self):
"""Human readable version"""
str = "Nshells: {} Nstates: {} Nstars: {}\n".format(self.Nshells, self.Nstates, self.Nstars)
for si in range(self.Nstars):
str += "Star {} ({})\n".format(si, len(self.stars[si]))
for i in self.stars[si]:
str += " {}: {}\n".format(i, self.states[i])
return str
def generate(self, Nshells, threshold=1e-8, originstates=False):
"""
Construct the points and the stars in the set. Does not include "origin states" by default; these
are PairStates that iszero() is True; they are only needed if crystal has a nonzero VectorBasis.
:param Nshells: number of shells to generate; this is interpreted as subsequent
"sums" of jumplist (as we need the solute to be connected to the vacancy by at least one jump)
:param threshold: threshold for determining equality with symmetry
:param originstates: include origin states in generate?
"""
if Nshells == getattr(self, 'Nshells', -1): return
self.Nshells = Nshells
if Nshells > 0:
stateset = set(self.jumplist)
else:
stateset = set([])
lastshell = stateset.copy()
if originstates:
for i in range(len(self.crys.basis[self.chem])):
stateset.add(PairState.zero(i, self.crys.dim))
for i in range(Nshells - 1):
# add all NNvect to last shell produced, always excluding 0
# lastshell = [v1+v2 for v1 in lastshell for v2 in self.NNvect if not all(abs(v1 + v2) < threshold)]
nextshell = set([])
for s1 in lastshell:
for s2 in self.jumplist:
# this try/except structure lets us attempt addition and kick out if not possible
try:
s = s1 + s2
except:
continue
if not s.iszero():
nextshell.add(s)
stateset.add(s)
lastshell = nextshell
# now to sort our set of vectors (easiest by magnitude, and then reduce down:
self.states = sorted([s for s in stateset], key=PairState.sortkey)
self.Nstates = len(self.states)
if self.Nstates > 0:
x2_indices = []
x2old = np.dot(self.states[0].dx, self.states[0].dx)
for i, x2 in enumerate([np.dot(st.dx, st.dx) for st in self.states]):
if x2 > (x2old + threshold):
x2_indices.append(i)
x2old = x2
x2_indices.append(len(self.states))
# x2_indices now contains a list of indices with the same magnitudes
self.stars = []
xmin = 0
for xmax in x2_indices:
complist_stars = [] # for finding unique stars
symmstate_list = [] # list of sets corresponding to those stars...
for xi in range(xmin, xmax):
x = self.states[xi]
# is this a new rep. for a unique star?
match = False
for i, gs in enumerate(symmstate_list):
if x in gs:
# update star
complist_stars[i].append(xi)
match = True
continue
if not match:
# new symmetry point!
complist_stars.append([xi])
symmstate_list.append(set([x.g(self.crys, self.chem, g) for g in self.crys.G]))
self.stars += complist_stars
xmin = xmax
else:
self.stars = [[]]
self.Nstars = len(self.stars)
# generate index: which star is each state a member of?
self.index = np.zeros(self.Nstates, dtype=int)
self.indexdict = {}
for si, star in enumerate(self.stars):
for xi in star:
self.index[xi] = si
self.indexdict[self.states[xi]] = (xi, si)
def addhdf5(self, HDF5group):
"""
Adds an HDF5 representation of object into an HDF5group (needs to already exist).
Example: if f is an open HDF5, then StarSet.addhdf5(f.create_group('StarSet')) will
(1) create the group named 'StarSet', and then (2) put the StarSet representation in that group.
:param HDF5group: HDF5 group
"""
HDF5group.attrs['type'] = self.__class__.__name__
HDF5group.attrs['crystal'] = self.crys.__repr__()
HDF5group.attrs['chem'] = self.chem
HDF5group['Nshells'] = self.Nshells
# convert jumplist (list of PS) into arrays to store:
HDF5group['jumplist_ij'], HDF5group['jumplist_R'], HDF5group['jumplist_dx'] = \
PSlist2array(self.jumplist)
HDF5group['jumplist_Nunique'] = len(self.jumpnetwork_index)
jumplistinvmap = np.zeros(len(self.jumplist), dtype=int)
for j, jlist in enumerate(self.jumpnetwork_index):
for i in jlist: jumplistinvmap[i] = j
HDF5group['jumplist_invmap'] = jumplistinvmap
# convert states into arrays to store:
HDF5group['states_ij'], HDF5group['states_R'], HDF5group['states_dx'] = \
PSlist2array(self.states)
HDF5group['states_index'] = self.index
@classmethod
def loadhdf5(cls, crys, HDF5group):
"""
Creates a new StarSet from an HDF5 group.
:param crys: crystal object--MUST BE PASSED IN as it is not stored with the StarSet
:param HDFgroup: HDF5 group
:return StarSet: new StarSet object
"""
SSet = cls(None, None, None) # initialize
SSet.crys = crys
SSet.chem = HDF5group.attrs['chem']
SSet.Nshells = HDF5group['Nshells'][()]
SSet.jumplist = array2PSlist(HDF5group['jumplist_ij'][()],
HDF5group['jumplist_R'][()],
HDF5group['jumplist_dx'][()])
SSet.jumpnetwork_index = [[] for n in range(HDF5group['jumplist_Nunique'][()])]
for i, jump in enumerate(HDF5group['jumplist_invmap'][()]):
SSet.jumpnetwork_index[jump].append(i)
SSet.states = array2PSlist(HDF5group['states_ij'][()],
HDF5group['states_R'][()],
HDF5group['states_dx'][()])
SSet.Nstates = len(SSet.states)
SSet.index = HDF5group['states_index'][()]
# construct the states, and the index dictionary:
SSet.Nstars = max(SSet.index) + 1
SSet.stars = [[] for n in range(SSet.Nstars)]
SSet.indexdict = {}
for xi, si in enumerate(SSet.index):
SSet.stars[si].append(xi)
SSet.indexdict[SSet.states[xi]] = (xi, si)
return SSet
def copy(self, empty=False):
"""Return a copy of the StarSet; done as efficiently as possible; empty means skip the shells, etc."""
newStarSet = self.__class__(None, None, None) # a little hacky... creates an empty class
newStarSet.jumpnetwork_index = copy.deepcopy(self.jumpnetwork_index)
newStarSet.jumplist = self.jumplist.copy()
newStarSet.crys = self.crys
newStarSet.chem = self.chem
if not empty:
newStarSet.Nshells = self.Nshells
newStarSet.stars = copy.deepcopy(self.stars)
newStarSet.states = self.states.copy()
newStarSet.Nstars = self.Nstars
newStarSet.Nstates = self.Nstates
newStarSet.index = self.index.copy()
newStarSet.indexdict = self.indexdict.copy()
else:
newStarSet.generate(0)
return newStarSet
# removed combine; all it does is generate(s1.Nshells + s2.Nshells) with lots of checks...
# replaced with (more efficient?) __add__ and __iadd__.
def __add__(self, other):
"""Add two StarSets together; done by making a copy of one, and iadding"""
if not isinstance(other, self.__class__): return NotImplemented
if self.Nshells >= other.Nshells:
scopy = self.copy()
scopy.__iadd__(other)
else:
scopy = other.copy()
scopy.__iadd__(self)
return scopy
def __iadd__(self, other):
"""Add another StarSet to this one; very similar to generate()"""
threshold = 1e-8
if not isinstance(other, self.__class__): return NotImplemented
if self.chem != other.chem: return ArithmeticError('Cannot add different chemistry index')
if other.Nshells < 1: return self
if self.Nshells < 1:
self.Nshells = other.Nshells
self.stars = copy.deepcopy(other.stars)
self.states = other.states.copy()
self.Nstars = other.Nstars
self.Nstates = other.Nstates
self.index = other.index.copy()
self.indexdict = other.indexdict.copy()
return self
self.Nshells += other.Nshells
Nold = self.Nstates
oldstateset = set(self.states)
newstateset = set([])
for s1 in self.states[:Nold]:
for s2 in other.states:
# this try/except structure lets us attempt addition and kick out if not possible
try:
s = s1 + s2
except:
continue
if not s.iszero() and not s in oldstateset: newstateset.add(s)
# now to sort our set of vectors (easiest by magnitude, and then reduce down:
self.states += sorted([s for s in newstateset], key=PairState.sortkey)
Nnew = len(self.states)
x2_indices = []
x2old = np.dot(self.states[Nold].dx, self.states[Nold].dx)
for i in range(Nold, Nnew):
| |
<reponame>Staberinde/data-hub-api<filename>datahub/search/investment/test/test_views.py
import datetime
from cgi import parse_header
from collections import Counter
from csv import DictReader
from decimal import Decimal
from io import StringIO
from unittest import mock
from uuid import UUID
import factory
import pytest
from dateutil.parser import parse as dateutil_parse
from django.conf import settings
from django.utils.timezone import utc
from freezegun import freeze_time
from rest_framework import status
from rest_framework.reverse import reverse
from datahub.company.models import OneListTier
from datahub.company.test.factories import AdviserFactory, CompanyFactory
from datahub.core import constants
from datahub.core.test_utils import (
APITestMixin,
create_test_user,
format_csv_data,
get_attr_or_none,
join_attr_values,
random_obj_for_queryset,
)
from datahub.feature_flag.test.factories import FeatureFlagFactory
from datahub.investment.project.constants import Involvement, LikelihoodToLand
from datahub.investment.project.models import InvestmentProject, InvestmentProjectPermission
from datahub.investment.project.test.factories import (
GVAMultiplierFactory,
InvestmentProjectFactory,
InvestmentProjectTeamMemberFactory,
VerifyWinInvestmentProjectFactory,
WonInvestmentProjectFactory,
)
from datahub.metadata.models import Sector
from datahub.metadata.test.factories import TeamFactory
from datahub.search.investment import InvestmentSearchApp
from datahub.search.investment.views import SearchInvestmentExportAPIView
pytestmark = [
pytest.mark.django_db,
# Index objects for this search app only
pytest.mark.es_collector_apps.with_args(InvestmentSearchApp),
]
@pytest.fixture
def project_with_max_gross_value_added():
"""Test fixture returns an investment project with the max gross value."""
gva_multiplier = GVAMultiplierFactory(
multiplier=Decimal('9.999999'),
financial_year=1980,
)
with mock.patch(
'datahub.investment.project.gva_utils.GrossValueAddedCalculator._get_gva_multiplier',
) as mock_get_multiplier:
mock_get_multiplier.return_value = gva_multiplier
project = InvestmentProjectFactory(
investment_type_id=constants.InvestmentType.fdi.value.id,
name='won project',
description='investmentproject3',
estimated_land_date=datetime.date(2027, 9, 13),
actual_land_date=datetime.date(2022, 11, 13),
investor_company=CompanyFactory(
address_country_id=constants.Country.united_kingdom.value.id,
),
project_manager=AdviserFactory(),
project_assurance_adviser=AdviserFactory(),
fdi_value_id=constants.FDIValue.higher.value.id,
status=InvestmentProject.Status.WON,
uk_region_locations=[
constants.UKRegion.north_west.value.id,
],
level_of_involvement_id=Involvement.hq_only.value.id,
likelihood_to_land_id=None,
foreign_equity_investment=9999999999999999999,
)
return project
@pytest.fixture
def setup_data(es_with_collector, project_with_max_gross_value_added):
"""Sets up data for the tests."""
investment_projects = [
InvestmentProjectFactory(
investment_type_id=constants.InvestmentType.fdi.value.id,
name='abc defg',
description='investmentproject1',
estimated_land_date=datetime.date(2011, 6, 13),
actual_land_date=datetime.date(2010, 8, 13),
investor_company=CompanyFactory(
address_country_id=constants.Country.united_states.value.id,
address_area_id=constants.AdministrativeArea.texas.value.id,
),
status=InvestmentProject.Status.ONGOING,
uk_region_locations=[
constants.UKRegion.east_midlands.value.id,
constants.UKRegion.isle_of_man.value.id,
],
level_of_involvement_id=Involvement.hq_and_post_only.value.id,
likelihood_to_land_id=LikelihoodToLand.high.value.id,
foreign_equity_investment=100000,
),
InvestmentProjectFactory(
investment_type_id=constants.InvestmentType.fdi.value.id,
name='delayed project',
description='investmentproject2',
estimated_land_date=datetime.date(2057, 6, 13),
actual_land_date=datetime.date(2047, 8, 13),
country_investment_originates_from_id=constants.Country.ireland.value.id,
investor_company=CompanyFactory(
address_country_id=constants.Country.japan.value.id,
),
project_manager=AdviserFactory(),
project_assurance_adviser=AdviserFactory(),
fdi_value_id=constants.FDIValue.higher.value.id,
status=InvestmentProject.Status.DELAYED,
uk_region_locations=[
constants.UKRegion.north_west.value.id,
],
level_of_involvement_id=Involvement.no_involvement.value.id,
likelihood_to_land_id=LikelihoodToLand.medium.value.id,
),
project_with_max_gross_value_added,
InvestmentProjectFactory(
name='new project',
description='investmentproject4',
country_investment_originates_from_id=constants.Country.canada.value.id,
estimated_land_date=None,
level_of_involvement_id=None,
likelihood_to_land_id=LikelihoodToLand.low.value.id,
),
]
es_with_collector.flush_and_refresh()
yield investment_projects
@pytest.fixture
def created_on_data(es_with_collector):
"""Setup data for created_on date filter test."""
investment_projects = []
dates = (
'2015-01-01', '2016-09-12', '2017-09-12', '2048-02-04', '2048-01-24',
)
for date in dates:
with freeze_time(date):
investment_projects.append(
InvestmentProjectFactory(),
)
es_with_collector.flush_and_refresh()
yield investment_projects
class TestSearch(APITestMixin):
"""Tests search views."""
def test_search_investment_project_json(self, setup_data):
"""Tests detailed investment project search."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'original_query': 'abc defg',
},
)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == 1
assert len(response.data['results']) == 1
assert response.data['results'][0]['name'] == 'abc defg'
@pytest.mark.parametrize(
'search,expected_gross_value_added,expected_project_name',
(
(
{
'gross_value_added_start': 99999999999999,
},
['99999989999999999990'],
['won project'],
),
(
{
'gross_value_added_end': 99999999999999,
},
['5810'],
['abc defg'],
),
(
{
'gross_value_added_start': 0,
'gross_value_added_end': 6000,
},
['5810'],
['abc defg'],
),
(
{
'gross_value_added_start': 20000000000000000000000,
},
[],
[],
),
),
)
def test_gross_value_added_filters(
self,
setup_data,
search,
expected_gross_value_added,
expected_project_name,
):
"""Test Gross Value Added (GVA) filters."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data=search,
)
assert response.status_code == status.HTTP_200_OK
assert (
Counter(
str(Decimal(result['gross_value_added'])) for result in response.data['results']
) == Counter(expected_gross_value_added)
), expected_gross_value_added
assert (
Counter(result['name'] for result in response.data['results'])
== Counter(expected_project_name)
), expected_project_name
def test_search_adviser_filter(self, es_with_collector):
"""Tests the adviser filter."""
adviser = AdviserFactory()
# Non-matching projects
project_other_1 = InvestmentProjectFactory()
InvestmentProjectTeamMemberFactory(investment_project=project_other_1)
InvestmentProjectTeamMemberFactory(investment_project=project_other_1)
InvestmentProjectFactory()
# Matching projects
project_1 = InvestmentProjectFactory()
InvestmentProjectTeamMemberFactory(adviser=adviser, investment_project=project_1)
InvestmentProjectTeamMemberFactory(investment_project=project_1)
project_2 = InvestmentProjectFactory(created_by=adviser)
project_3 = InvestmentProjectFactory(client_relationship_manager=adviser)
project_4 = InvestmentProjectFactory(project_manager=adviser)
project_5 = InvestmentProjectFactory(project_assurance_adviser=adviser)
# Should only be returned once
project_6 = InvestmentProjectFactory(
created_by=adviser,
client_relationship_manager=adviser,
project_assurance_adviser=adviser,
project_manager=adviser,
)
InvestmentProjectTeamMemberFactory(adviser=adviser, investment_project=project_6)
es_with_collector.flush_and_refresh()
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'adviser': adviser.pk,
},
)
assert response.status_code == status.HTTP_200_OK
response_data = response.json()
assert response_data['count'] == 6
results = response_data['results']
expected_ids = {
str(project_1.pk), str(project_2.pk), str(project_3.pk),
str(project_4.pk), str(project_5.pk), str(project_6.pk),
}
assert {result['id'] for result in results} == expected_ids
@pytest.mark.parametrize(
'query,num_results',
(
(
{
'estimated_land_date_before': '2017-06-13',
},
1,
),
(
{
'estimated_land_date_after': '2017-06-13',
},
2,
),
(
{
'estimated_land_date_after': '2017-06-13',
'estimated_land_date_before': '2030-06-13',
},
1,
),
(
{
'estimated_land_date_before': '2017-06-13',
'estimated_land_date_after': '2030-06-13',
},
0,
),
),
)
def test_search_investment_project_estimated_land_date_json(
self,
setup_data,
query,
num_results,
):
"""Tests detailed investment project search."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(url, query)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == num_results
results = response.data['results']
assert len(results) == num_results
for result in results:
estimated_land_date = dateutil_parse(result['estimated_land_date'])
for filter_key, date in query.items():
date = dateutil_parse(date)
if filter_key == 'estimated_land_date_before':
assert estimated_land_date <= date
if filter_key == 'estimated_land_date_after':
assert estimated_land_date >= date
@pytest.mark.parametrize(
'query,expected_results',
(
(
{
'actual_land_date_before': '2010-12-13',
},
[
'abc defg',
],
),
(
{
'actual_land_date_before': '2022-11-13',
},
[
'abc defg',
'won project',
],
),
(
{
'actual_land_date_after': '2010-12-13',
},
[
'delayed project',
'won project',
],
),
(
{
'actual_land_date_after': '2022-11-13',
},
[
'delayed project',
'won project',
],
),
(
{
'actual_land_date_after': '2010-12-13',
'actual_land_date_before': '2025-06-13',
},
[
'won project',
],
),
(
{
'actual_land_date_before': '2010-12-13',
'actual_land_date_after': '2025-06-13',
},
[],
),
),
)
def test_search_investment_project_actual_land_date_json(
self,
setup_data,
query,
expected_results,
):
"""Tests the actual land date filter."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(url, query)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == len(expected_results)
results = response.data['results']
assert Counter(result['name'] for result in results) == Counter(expected_results)
@pytest.mark.parametrize(
'query,num_results',
(
(
{
'created_on_before': '2016-09-13T09:44:31.062870Z',
},
2,
),
(
{
'created_on_before': '2016-09-12T00:00:00.000000Z',
},
2,
),
(
{
'created_on_after': '2017-06-13T09:44:31.062870Z',
},
3,
),
(
{
'created_on_after': '2016-09-12T00:00:00.000000Z',
},
4,
),
(
{
'created_on_after': '2017-06-13T09:44:31.062870Z',
'created_on_before': '2048-02-01T05:44:31.062870Z',
},
2,
),
(
{
'created_on_before': '2017-06-13T09:44:31.062870Z',
'created_on_after': '2048-02-01T05:44:31.062870Z',
},
0,
),
),
)
def test_search_investment_project_created_on_json(self, created_on_data, query, num_results):
"""Tests detailed investment project search."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(url, query)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == num_results
results = response.data['results']
assert len(results) == num_results
for result in results:
created_on = dateutil_parse(result['created_on']).replace(tzinfo=utc)
for filter_key, date in query.items():
date = dateutil_parse(date)
if filter_key == 'created_on_before':
assert created_on <= date
if filter_key == 'created_on_after':
assert created_on >= date
def test_search_investment_project_invalid_date_json(self, setup_data):
"""Tests detailed investment project search."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'estimated_land_date_before': 'this is definitely not a valid date',
},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {'estimated_land_date_before': ['Date is in incorrect format.']}
def test_search_investment_project_status(self, setup_data):
"""Tests investment project search status filter."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'status': ['delayed', 'won'],
},
)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == 2
assert len(response.data['results']) == 2
statuses = {result['status'] for result in response.data['results']}
assert statuses == {'delayed', 'won'}
def test_search_investment_project_investor_country(self, setup_data):
"""Tests investor company country filter."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'investor_company_country': constants.Country.japan.value.id,
},
)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == 1
assert len(response.data['results']) == 1
assert response.data['results'][0]['name'] == 'delayed project'
def test_search_investment_project_country_investment_originates_from_filter(self, setup_data):
"""Tests country investment originates from filter."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'country_investment_originates_from': constants.Country.united_states.value.id,
},
)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == 1
assert len(response.data['results']) == 1
assert response.data['results'][0]['name'] == 'abc defg'
def test_search_investment_project_investor_country_when_investment_origin_set(
self, setup_data,
):
"""Tests investor company country filter when investment origin also set."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'investor_company_country': constants.Country.ireland.value.id,
},
)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == 1
assert len(response.data['results']) == 1
assert response.data['results'][0]['name'] == 'delayed project'
def test_search_investment_project_investment_origin(
self, setup_data,
):
"""Tests country investment originates from filter."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(
url,
data={
'country_investment_originates_from': constants.Country.canada.value.id,
},
)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == 1
assert len(response.data['results']) == 1
assert response.data['results'][0]['name'] == 'new project'
@pytest.mark.parametrize(
'query,expected_results',
(
(
{
'level_of_involvement_simplified': 'unspecified',
},
[
'new project',
],
),
(
{
'level_of_involvement_simplified': ['unspecified', 'involved'],
},
[
'new project',
'abc defg',
'won project',
],
),
(
{
'level_of_involvement_simplified': ['not_involved', 'involved'],
},
[
'abc defg',
'delayed project',
'won project',
],
),
(
{
'level_of_involvement_simplified': 'involved',
},
[
'abc defg',
'won project',
],
),
(
{
'level_of_involvement_simplified': 'not_involved',
},
[
'delayed project',
],
),
(
{
'level_of_involvement_simplified': ['unspecified', 'not_involved'],
},
[
'new project',
'delayed project',
],
),
(
{
'likelihood_to_land': LikelihoodToLand.low.value.id,
},
[
'new project',
],
),
(
{
'likelihood_to_land': [
LikelihoodToLand.low.value.id,
LikelihoodToLand.medium.value.id,
],
},
[
'new project', 'delayed project',
],
),
(
{
},
[
'abc defg',
'delayed project',
'won project',
'new project',
],
),
),
)
def test_search_involvement_json(
self,
setup_data,
query,
expected_results,
):
"""Tests the involvement filter."""
url = reverse('api-v3:search:investment_project')
response = self.api_client.post(url, query)
assert response.status_code == status.HTTP_200_OK
assert response.data['count'] == len(expected_results)
results = response.data['results']
assert Counter(result['name'] for result in results) == Counter(expected_results)
@pytest.mark.parametrize(
'query,expected_error',
(
(
{
'level_of_involvement_simplified': 'unspecified1',
},
{
'level_of_involvement_simplified': ['"unspecified1" is not a valid choice.'],
},
),
(
{
'level_of_involvement_simplified': ['unspecified5', 'great_involvement'],
},
{
'level_of_involvement_simplified': {
'0': ['"unspecified5" is not a valid choice.'],
'1': ['"great_involvement" is not a valid choice.'],
},
},
),
(
{
'level_of_involvement_simplified': ['not_involved', | |
= causal_effect_scale / ate * y1
y0 = causal_effect_scale / ate * y0
if ret_counterfactuals:
return y0, y1
else:
return y0 * (1 - t) + y1 * t
def set_seed(self, seed=SEED):
torch.manual_seed(seed)
np.random.seed(seed)
def sample(self, w=None, transform_w=True, untransform=True, seed=None, dataset=TRAIN, overlap=1,
causal_effect_scale=None, deg_hetero=1.0, ret_counterfactuals=False):
"""
Sample from generative model.
:param w: covariates (confounders)
:param transform_w: whether to transform the w (if given)
:param untransform: whether to transform the data back to the raw scale
:param seed: random seed
:param dataset: train or test for sampling w from
:param overlap: if 1, leave treatment untouched;
if 0, push p(T = 1 | w) to 0 for all w where p(T = 1 | w) < 0.5 and
and push p(T = 1 | w) to 1 for all w where p(T = 1 | w) >= 0.5
if 0 < overlap < 1, do a linear interpolation of the above
:param causal_effect_scale: scale of the causal effect (size of ATE)
:param deg_hetero: degree of heterogeneity (between 0 and 1)
When deg_hetero=1, y1 and y0 remain unchanged. When deg_hetero=0,
y1 - y0 is the same for all individuals.
:param ret_counterfactuals: return counterfactuals if True
:return: (w, t, y)
"""
if seed is not None:
self.set_seed(seed)
if w is None:
w = self.sample_w(untransform=False, dataset=dataset)
elif transform_w:
w = self.w_transform.transform(w)
t = self.sample_t(w, untransform=False, overlap=overlap)
if ret_counterfactuals:
y0, y1 = self.sample_y(
t, w, untransform=False, causal_effect_scale=causal_effect_scale,
deg_hetero=deg_hetero, ret_counterfactuals=True
)
if untransform:
return (self.w_transform.untransform(w), self.t_transform.untransform(t),
(self.y_transform.untransform(y0), self.y_transform.untransform(y1)))
else:
return w, t, (y0, y1)
else:
y = self.sample_y(t, w, untransform=False,
causal_effect_scale=causal_effect_scale,
deg_hetero=deg_hetero, ret_counterfactuals=False)
if untransform:
return self.w_transform.untransform(w), self.t_transform.untransform(t), self.y_transform.untransform(y)
else:
return w, t, y
def sample_interventional(self, t, w=None, seed=None, causal_effect_scale=None, deg_hetero=1.0):
if seed is not None:
self.set_seed(seed)
if w is None:
w = self.sample_w(untransform=False)
if isinstance(w, Number):
raise ValueError('Unsupported data type: {} ... only numpy is currently supported'.format(type(w)))
if isinstance(t, Number):
t = np.full_like(self.t, t)
return self.sample_y(t, w, causal_effect_scale=causal_effect_scale, deg_hetero=deg_hetero)
def ate(self, t1=1, t0=0, w=None, noisy=True, untransform=True, transform_t=True, n_y_per_w=100,
causal_effect_scale=None, deg_hetero=1.0):
return self.ite(t1=t1, t0=t0, w=w, noisy=noisy, untransform=untransform,
transform_t=transform_t, n_y_per_w=n_y_per_w,
causal_effect_scale=causal_effect_scale,
deg_hetero=deg_hetero).mean()
def noisy_ate(self, t1=1, t0=0, w=None, n_y_per_w=100, seed=None, transform_w=False):
if w is not None and transform_w:
w = self.w_transform.transform(w)
# Note: bad things happen if w is not transformed and transform_w is False
if seed is not None:
self.set_seed(seed)
if (isinstance(t1, Number) or isinstance(t0, Number)) and w is not None:
t_shape = list(self.t.shape)
t_shape[0] = w.shape[0]
t1 = np.full(t_shape, t1)
t0 = np.full(t_shape, t0)
total = 0
for _ in range(n_y_per_w):
total += (self.sample_interventional(t=t1, w=w) -
self.sample_interventional(t=t0, w=w)).mean()
return total / n_y_per_w
def att(self, t1=1, t0=0, w=None, untransform=True, transform_t=True):
pass
# TODO
# return self.ite(t1=t1, t0=t0, w=w, untransform=untransform,
# transform_t=transform_t).mean()
def ite(self, t1=1, t0=0, w=None, t=None, untransform=True, transform_t=True, transform_w=True,
estimand="all", noisy=True, seed=None, n_y_per_w=100,
causal_effect_scale=None, deg_hetero=1.0):
if seed is not None:
self.set_seed(seed)
if w is None:
# w = self.w_transformed
w = self.sample_w(untransform=False)
t = self.t
estimand = estimand.lower()
if estimand == "all" or estimand == "ate":
pass
elif estimand == "treated" or estimand == "att":
w = w[t == 1]
elif estimand == "control" or estimand == "atc":
w = w[t == 0]
else:
raise ValueError("Invalid estimand: {}".format(estimand))
if transform_t:
t1 = self.t_transform.transform(t1)
t0 = self.t_transform.transform(t0)
# Note: check that this is an identity transformation
if isinstance(t1, Number) or isinstance(t0, Number):
t_shape = list(self.t.shape)
t_shape[0] = w.shape[0]
t1 = np.full(t_shape, t1)
t0 = np.full(t_shape, t0)
# if transform_w:
# w = self.w_transform.transform(w)
if noisy:
y1_total = np.zeros(w.shape[0])
y0_total = np.zeros(w.shape[0])
for _ in range(n_y_per_w):
y1_total += to_np_vector(self.sample_interventional(
t=t1, w=w,causal_effect_scale=causal_effect_scale, deg_hetero=deg_hetero))
y0_total += to_np_vector(self.sample_interventional(
t=t0, w=w, causal_effect_scale=causal_effect_scale, deg_hetero=deg_hetero))
y_1 = y1_total / n_y_per_w
y_0 = y0_total / n_y_per_w
else:
if causal_effect_scale is not None or deg_hetero != 1.0:
raise ValueError('Invalid causal_effect_scale or deg_hetero. '
'Current mean_y only supports defaults.')
y_1 = to_np_vector(self.mean_y(t=t1, w=w))
y_0 = to_np_vector(self.mean_y(t=t0, w=w))
# This is already done in sample_interventional --> sample_y
# TODO: add this argument to sample_interventional and pass it to sample_y
# if untransform:
# y_1 = self.y_transform.untransform(y_1)
# y_0 = self.y_transform.untransform(y_0)
return y_1 - y_0
def plot_ty_dists(self, joint=True, marginal_hist=True, marginal_qq=True,
dataset=TRAIN, transformed=False, verbose=True,
title=True, name=None, file_ext='pdf', thin_model=None,
thin_true=None, joint_kwargs={}, test=False, seed=None):
"""
Creates up to 3 different plots of the real data and the corresponding model
:param joint: boolean for whether to plot p(t, y)
:param marginal_hist: boolean for whether to plot the p(t) and p(y) histograms
:param marginal_qq: boolean for whether to plot the p(y) Q-Q plot
or use 'both' for plotting both the p(t) and p(y) Q-Q plots
:param dataset: dataset subset to use (train, val, or test)
:param transformed: If True, use transformed version of data.
If False, use original (non-transformed) version of data.
:param title: boolean for whether or not to include title in plots
:param name: name to use in plot titles and saved files defaults to name of class
:param file_ext: file extension to for saving plots (e.g. 'pdf', 'png', etc.)
:param thin_model: thinning interval for the model data
:param thin_true: thinning interval for the real data
:param joint_kwargs: kwargs passed to sns.kdeplot() for p(t, y)
:param test: if True, does not show or save plots
:param seed: seed for sample from generative model
:return:
"""
if name is None:
name = self.__class__.__name__
_, t_model, y_model = to_np_vectors(self.sample(seed=seed, untransform=(not transformed)),
thin_interval=thin_model)
_, t_true, y_true = self.get_data(transformed=transformed, dataset=dataset, verbose=verbose)
t_true, y_true = to_np_vectors((t_true, y_true), thin_interval=thin_true)
plots = []
if joint:
fig1 = compare_joints(t_model, y_model, t_true, y_true,
xlabel1=T_MODEL_LABEL, ylabel1=Y_MODEL_LABEL,
xlabel2=T_TRUE_LABEL, ylabel2=Y_TRUE_LABEL,
xlabel=T, ylabel=Y,
label1=MODEL_LABEL, label2=TRUE_LABEL,
save_fname='{}_ty_joints.{}'.format(name, file_ext),
title=title, name=name, test=test, kwargs=joint_kwargs)
plots += [fig1]
if marginal_hist or marginal_qq:
plots += compare_bivariate_marginals(t_true, t_model, y_true, y_model,
xlabel=T, ylabel=Y,
label1=TRUE_LABEL, label2=MODEL_LABEL,
hist=marginal_hist, qqplot=marginal_qq,
save_hist_fname='{}_ty_marginal_hists.{}'.format(name, file_ext),
save_qq_fname='{}_ty_marginal_qqplots.{}'.format(name, file_ext),
title=title, name=name, test=test)
return plots
def get_univariate_quant_metrics(self, dataset=TRAIN, transformed=False, verbose=True,
thin_model=None, thin_true=None, seed=None, n=None):
"""
Calculates quantitative metrics for the difference between p(t) and
p_model(t) and the difference between p(y) and p_model(y)
:param dataset: dataset subset to evaluate on (train, val, or test)
:param transformed: If True, use transformed version of data.
If False, use original (non-transformed) version of data.
:param thin_model: thinning interval for the model data
:param thin_true: thinning interval for the real data
:param seed: seed for sample from generative model
:return: {
't_ks_pval': ks p-value with null that t_model and t_true are from the same distribution
'y_ks_pval': ks p-value with null that y_model and y_true are from the same distribution
't_wasserstein1_dist': wasserstein1 distance between t_true and t_model
'y_wasserstein1_dist': wasserstein1 distance between y_true and y_model
}
"""
_, t_model, y_model = to_np_vectors(
self.sample(seed=seed, untransform=(not transformed)),
thin_interval=thin_model
)
_, t_true, y_true = self.get_data(transformed=transformed, dataset=dataset, verbose=verbose)
t_true, y_true = to_np_vectors((t_true, y_true), thin_interval=thin_true)
# jitter for numerical stability
t_true = t_true.copy() + np.random.rand(*t_true.shape) * 1e-6
t_model = t_model.copy() + np.random.rand(*t_model.shape) * 1e-6
ks_label = "_ks_pval"
es_label = "_es_pval"
wasserstein_label = "_wasserstein1_dist"
metrics = {
T + ks_label: float(stats.ks_2samp(t_model, t_true).pvalue),
Y + ks_label: float(stats.ks_2samp(y_model, y_true).pvalue),
T + es_label: float(stats.epps_singleton_2samp(t_model, t_true).pvalue),
Y + es_label: float(stats.epps_singleton_2samp(y_model, y_true).pvalue),
T + wasserstein_label: float(stats.wasserstein_distance(t_model, t_true)),
Y + wasserstein_label: float(stats.wasserstein_distance(y_model, y_true)),
}
return metrics
def get_multivariate_quant_metrics(
self,
include_w=True,
dataset=TRAIN,
transformed=False,
norm=2,
k=1,
alphas=None,
n_permutations=1000,
seed=None,
verbose=False,
n=None
):
"""
Computes Wasserstein-1 and Wasserstein-2 distances. Also computes all the
test statistics and p-values for the multivariate two sample tests from
the torch_two_sample package. See that documentation for more info on
the specific tests: https://torch-two-sample.readthedocs.io/en/latest/
:param include_w: If False, test if p(t, y) = p_model(t, y).
If True, test if p(w, t, y) = p(w, t, y).
:param dataset: dataset subset to evaluate on (train, val, or test)
:param transformed: If True, use transformed version of data.
If False, use original (non-transformed) version of data.
:param norm: norm used for Friedman-Rafsky test and kNN test
:param k: number of nearest neighbors to use for kNN test
:param alphas: list of kernel parameters for MMD test
:param n_permutations: number of permutations for each test
:param seed: seed for sample | |
#!/bin/python
import subprocess
import os
from optparse import OptionParser
import linecache
import json
import time
import datetime
import socket
import sys
usage = "Usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-d", "--directory",
action="store", dest="homepath", help="Directory to run from")
(options, args) = parser.parse_args()
date = time.strftime("%Y%m%d")
hostname=socket.gethostname().partition(".")[0]
if options.homepath is None:
homepath = os.getcwd()
else:
homepath = options.homepath
datadir = 'data/'
newInstanceAvailable = False
def listtocsv(lists):
finallog = ''
for i in range(0,len(lists)):
finallog = finallog + str(lists[i])
if(i+1 != len(lists)):
finallog = finallog + ','
if finallog != "":
csvFile.write("%s\n"%(finallog))
def getindex(colName):
if colName == "CPU":
return 4001
elif colName == "DiskRead" or colName == "DiskWrite":
return 4002
elif colName == "NetworkIn" or colName == "NetworkOut":
return 4003
elif colName == "MemUsed":
return 4004
elif "InOctets" in colName or "OutOctets" in colName:
return 4005
elif "InDiscards" in colName or "OutDiscards" in colName:
return 4006
elif "InErrors" in colName or "OutErrors" in colName:
return 4007
elif colName == "SwapUsed" or colName == "SwapTotal":
return 4008
metricResults = {}
def toJson (header, values):
global metricResults
if header == "" or values == "":
return
headerFields = header.split(",")
valueFields = values.split(",")
for i in range(0,len(headerFields)):
metricResults[headerFields[i]] = valueFields[i]
def updateResults():
print "In Function updateResults()"
global metricResults
if not metricResults:
return
with open(os.path.join(homepath,datadir+"previous_results.json"),'w') as f:
json.dump(metricResults,f)
def initPreviousResults():
print "In Function initPreviousResults()"
global numlines
global date
global hostname
log = ''
fieldnames = ''
for i in range(len(dockers)-1):
try:
filename = "stat%s.txt"%dockers[i]
statsFile = open(os.path.join(homepath,datadir+filename),'r')
data = statsFile.readlines()
except IOError as e:
print "I/O error({0}): {1}: {2}".format(e.errno, e.strerror, e.filename)
continue
finally:
statsFile.close()
for eachline in data:
if isJson(eachline) == True:
metricData = json.loads(eachline)
break
#Generating the header line for the data file
if(numlines < 1):
fields = ["timestamp","CPU","DiskRead","DiskWrite","NetworkIn","NetworkOut","MemUsed","SwapTotal","SwapUsed"]
if i == 0:
fieldnames = fields[0]
host = dockers[i]
for j in range(1,len(fields)):
if(fieldnames != ""):
fieldnames = fieldnames + ","
groupid = getindex(fields[j])
nextfield = fields[j] + "[" +host+"_"+hostname+"]"+":"+str(groupid)
fieldnames = fieldnames + nextfield
else:
fieldnames = linecache.getline(os.path.join(homepath,datadir+date+".csv"),1)
timestamp = metricData['read'][:19]
timestamp = int(time.mktime(datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S").timetuple())*1000)
try:
networkInterfaceMetrics = []
if 'network' in metricData or 'networks' in metricData:
networkRx = round(float(float(metricData['network']['rx_bytes'])/(1024*1024)),4) #MB
networkTx = round(float(float(metricData['network']['tx_bytes'])/(1024*1024)),4) #MB
else:
networkRx = 0
networkTx = 0
except KeyError,e:
try:
networkMetrics = metricData['networks']
networkRx = 0
networkTx = 0
for key in networkMetrics:
# Append New Field Headers for specific Interface
if (numlines < 1 or newInstanceAvailable == True):
nextfield = "InOctets-" + key + "[" + host + "_" + hostname + "]" + ":" + str(
getindex("InOctets"))
fieldnames = fieldnames + "," + nextfield
nextfield = "OutOctets-" + key + "[" + host + "_" + hostname + "]" + ":" + str(
getindex("OutOctets"))
fieldnames = fieldnames + "," + nextfield
nextfield = "InDiscards-" + key + "[" + host + "_" + hostname + "]" + ":" + str(
getindex("InDiscards"))
fieldnames = fieldnames + "," + nextfield
nextfield = "OutDiscards-" + key + "[" + host + "_" + hostname + "]" + ":" + str(
getindex("OutDiscards"))
fieldnames = fieldnames + "," + nextfield
nextfield = "InErrors-" + key + "[" + host + "_" + hostname + "]" + ":" + str(
getindex("InErrors"))
fieldnames = fieldnames + "," + nextfield
nextfield = "OutErrors-" + key + "[" + host + "_" + hostname + "]" + ":" + str(
getindex("OutErrors"))
fieldnames = fieldnames + "," + nextfield
metricVal = float(networkMetrics[key]['rx_bytes'])
networkInterfaceMetrics.append(round(float(metricVal / (1024 * 1024)), 4))
metricVal = float(networkMetrics[key]['tx_bytes'])
networkInterfaceMetrics.append(round(float(metricVal / (1024 * 1024)), 4))
metricVal = float(networkMetrics[key]['rx_dropped'])
networkInterfaceMetrics.append(round(float(metricVal / (1024 * 1024)), 4))
metricVal = float(networkMetrics[key]['tx_dropped'])
networkInterfaceMetrics.append(round(float(metricVal / (1024 * 1024)), 4))
metricVal = float(networkMetrics[key]['rx_errors'])
networkInterfaceMetrics.append(round(float(metricVal / (1024 * 1024)), 4))
metricVal = float(networkMetrics[key]['tx_errors'])
networkInterfaceMetrics.append(round(float(metricVal / (1024 * 1024)), 4))
#Adding up values for all interfaces to get the total
networkRx += float(networkMetrics[key]['rx_bytes'])
networkTx += float(networkMetrics[key]['tx_bytes'])
networkRx = round(float(networkRx/(1024*1024)),4) #MB
networkTx = round(float(networkTx/(1024*1024)),4) #MB
except KeyError, e:
print "Couldn't fetch network information for container: " + dockers[i]
networkRx = "NaN"
networkTx = "NaN"
try:
cpu = round(float(metricData['cpu_stats']['cpu_usage']['total_usage'])/10000000,4) #Convert nanoseconds to jiffies
except KeyError, e:
print "Couldn't fetch cpu information for container: " + dockers[i]
cpu = "NaN"
try:
memUsed = round(float(float(metricData['memory_stats']['usage'])/(1024*1024)),4) #MB
except KeyError, e:
print "Couldn't fetch memory information for container: " + dockers[i]
memUsed = "NaN"
try:
if len(metricData['blkio_stats']['io_service_bytes_recursive']) == 0:
diskRead = "NaN"
diskWrite = "NaN"
else:
diskRead = round(float(float(metricData['blkio_stats']['io_service_bytes_recursive'][0]['value'])/(1024*1024)),4) #MB
diskWrite = round(float(float(metricData['blkio_stats']['io_service_bytes_recursive'][1]['value'])/(1024*1024)),4) #MB
except (KeyError, IndexError) as e:
print "Couldn't fetch disk information for container: " + dockers[i]
diskRead = "NaN"
diskWrite = "NaN"
try:
swapTotal = round(float(float(metricData['memory_stats']['stats']['total_swap'])/(1024*1024)),4) #MB
swapUsed = round(float(float(metricData['memory_stats']['stats']['swap'])/(1024*1024)),4) #MB
except KeyError, e:
print "Couldn't fetch swap information for container: " + dockerInstances[i]
swapUsed = "NaN"
swapTotal = "NaN"
if i == 0:
log = log + str(timestamp)
log = log + "," + str(cpu) + "," + str(diskRead) + "," + str(diskWrite) + "," + str(networkRx) + "," + str(networkTx) + "," + str(memUsed) + "," + str(swapTotal)+ "," + str(swapUsed)
if networkInterfaceMetrics:
log = log + "," + ",".join(map(str, networkInterfaceMetrics))
toJson(fieldnames,log)
updateResults()
time.sleep(1)
proc = subprocess.Popen([os.path.join(homepath,datadir+"getmetrics_docker.sh")], cwd=homepath, stdout=subprocess.PIPE, shell=True)
(out,err) = proc.communicate()
def getPreviousResults():
print "In Function getPreviousResults()"
with open(os.path.join(homepath,datadir+"previous_results.json"),'r') as f:
return json.load(f)
def isJson(jsonString):
print "In Function isJson()"
try:
jsonObject = json.loads(jsonString)
if jsonObject['read'] != "":
return True
except ValueError, e:
return False
except TypeError, e:
return False
return False
def checkDelta(fd):
print "In Function checkDelta()"
deltaFields = ["CPU", "DiskRead", "DiskWrite", "NetworkIn", "NetworkOut","InOctets", "OutOctets", "InErrors", "OutErrors", "InDiscards", "OutDiscards"]
for eachfield in deltaFields:
if(eachfield == fd or fd.startswith(eachfield)):
return True
return False
precpu={}
def calculateDelta():
print "In Function calculateDelta()"
global fieldnames
global metricResults
finallogList = []
if fieldnames == "":
return finallogList
fieldsList = fieldnames.split(",")
previousResult = getPreviousResults()
currentResult = metricResults
for key in fieldsList:
if((key.split('[')[0]) == "CPU"):
if key not in precpu:
deltaValue = "NaN"
finallogList.append(deltaValue)
continue
previousCPU = precpu[key]
if str(currentResult[key]) == "NaN" or str(previousCPU) == "NaN":
deltaValue = "NaN"
else:
deltaValue = round((float(currentResult[key]) - float(previousCPU)),4)
if deltaValue < 0:
deltaValue = 0
finallogList.append(deltaValue)
elif(checkDelta(key.split('[')[0]) == True):
if key not in currentResult or key not in previousResult:
deltaValue = "NaN"
elif str(currentResult[key]) == "NaN" or str(previousResult[key]) == "NaN":
deltaValue = "NaN"
else:
deltaValue = float(currentResult[key]) - float(previousResult[key])
if deltaValue < 0:
deltaValue = 0
finallogList.append(deltaValue)
else:
if key not in currentResult:
currentValue = "NaN"
finallogList.append(currentValue)
else:
finallogList.append(currentResult[key])
return finallogList
def removeStatFiles():
print "In Function removeStatFiles()"
global dockerInstances
for i in range(len(dockerInstances)):
statfile = "stat%s.txt"%dockerInstances[i]
if os.path.isfile(os.path.join(homepath,datadir+statfile)) == True:
os.remove(os.path.join(homepath,datadir+statfile))
dockerInstances = []
def update_docker():
print "In Function update_docker()"
global dockers
global newInstanceAvailable
global dockerInstances
proc = subprocess.Popen(["docker ps | awk '{if(NR!=1) print $1}'"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
dockers = out.split("\n")
cronfile = open(os.path.join(homepath,datadir+"getmetrics_docker.sh"),'w')
cronfile.write("#!/bin/bash\nDATADIR='data/'\ncd $DATADIR\n")
cronfile.write("now=$(date +%M)\n")
containerCount = 0
for container in dockers:
if container == "":
continue
containerCount+=1
command = "echo -e \"GET /containers/"+container+"/stats?stream=0 HTTP/1.1\\r\\nHost: localhost\\r\\n\" | nc -U -i 10 /var/run/docker.sock > stat"+container+".txt & PID"+str(containerCount)+"=$!"
cronfile.write(command+"\n")
for i in range(1,containerCount+1):
cronfile.write("wait $PID"+str(i)+"\n")
cronfile.write(
"if [ $now -eq \"00\" ] || [ $now -eq \"15\" ] || [ $now -eq \"30\" ] || [ $now -eq \"45\" ];\nthen\n")
for container in dockers:
if container == "":
continue
command = " cat stat" + container + ".txt > stat" + container + "_backup.txt\n"
cronfile.write(command)
cronfile.write("else\n")
for container in dockers:
if container == "":
continue
command = " cat stat" + container + ".txt >> stat" + container + "_backup.txt\n"
cronfile.write(command)
cronfile.write("fi\n")
cronfile.close()
os.chmod(os.path.join(homepath,datadir+"getmetrics_docker.sh"),0755)
if os.path.isfile(os.path.join(homepath,datadir+"totalInstances.json")) == False:
towritePreviousInstances = {}
for containers in dockers:
if containers != "":
dockerInstances.append(containers)
towritePreviousInstances["overallDockerInstances"] = dockerInstances
with open(os.path.join(homepath,datadir+"totalInstances.json"),'w') as f:
json.dump(towritePreviousInstances,f)
else:
with open(os.path.join(homepath,datadir+"totalInstances.json"),'r') as f:
dockerInstances = json.load(f)["overallDockerInstances"]
dockers = filter(None, dockers)
for eachDocker in dockers:
print ("Searching for",eachDocker)
if eachDocker not in dockerInstances:
newInstanceAvailable = True
if newInstanceAvailable or (len(dockers) != len(dockerInstances)):
newInstanceAvailable = True
print ("Making the call to the server for update instance information.",len(dockers),len(dockerInstances))
writeInsatanceFile("currentInstances", dockers)
writeInsatanceFile("previousInstances", dockerInstances)
towritePreviousInstances = {}
towritePreviousInstances["overallDockerInstances"] = dockers
dockerInstances = dockers
with open(os.path.join(homepath,datadir+"totalInstances.json"),'w') as f:
json.dump(towritePreviousInstances,f)
def writeInsatanceFile(filename, instanceList):
global hostname
jsonData = {}
print "In Function writeInsatanceFile()"
print | |
<gh_stars>0
#
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import hashlib
from datetime import datetime
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Count, Case, When, IntegerField, F, BooleanField
from django.utils.text import format_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now, pytz
from bridge.vars import JOB_STATUS, USER_ROLES, JOB_ROLES, JOB_WEIGHT, SAFE_VERDICTS, UNSAFE_VERDICTS, ASSOCIATION_TYPE
from bridge.utils import logger, BridgeException, file_get_or_create, get_templated_text
from users.notifications import Notify
from jobs.models import Job, JobHistory, FileSystem, UserRole, JobFile
from reports.models import ReportComponent, ReportSafe, ReportUnsafe, ReportUnknown, ReportAttr
from marks.models import MarkSafeReport, MarkSafeTag, MarkUnsafeReport, MarkUnsafeTag, MarkUnknownReport
# List of available types of 'safe' column class.
SAFES = [
'missed_bug',
'incorrect',
'unknown',
'inconclusive',
'unassociated',
'total'
]
# List of available types of 'unsafe' column class.
UNSAFES = [
'bug',
'target_bug',
'false_positive',
'unknown',
'inconclusive',
'unassociated',
'total'
]
# Dictionary of titles of static columns
TITLES = {
'name': _('Title'),
'author': _('Last change author'),
'date': _('Last change date'),
'status': _('Decision status'),
'safe': _('Safes'),
'safe:missed_bug': _('Missed target bugs'),
'safe:incorrect': _('Incorrect proof'),
'safe:unknown': _('Unknown'),
'safe:inconclusive': _('Incompatible marks'),
'safe:unassociated': _('Without marks'),
'safe:total': _('Total'),
'unsafe': _('Unsafes'),
'unsafe:bug': _('Bugs'),
'unsafe:target_bug': _('Target bugs'),
'unsafe:false_positive': _('False positives'),
'unsafe:unknown': _('Unknown'),
'unsafe:inconclusive': _('Incompatible marks'),
'unsafe:unassociated': _('Without marks'),
'unsafe:total': _('Total'),
'problem': _('Unknowns'),
'problem:total': _('Total'),
'resource': _('Consumed resources'),
'resource:total': _('Total'),
'tag': _('Tags'),
'tag:safe': _('Safes'),
'tag:unsafe': _('Unsafes'),
'identifier': _('Identifier'),
'format': _('Format'),
'version': _('Version'),
'parent_id': format_lazy('{0}/{1}', _('Parent'), _('Identifier')),
'role': _('Your role'),
'priority': _('Priority'),
'start_date': _('Decision start date'),
'finish_date': _('Decision finish date'),
'solution_wall_time': _('Decision wall time'),
'operator': _('Operator'),
'tasks': _('Verification tasks'),
'tasks:pending': _('Pending'),
'tasks:processing': _('Processing'),
'tasks:finished': _('Finished'),
'tasks:error': _('Error'),
'tasks:cancelled': _('Cancelled'),
'tasks:total': _('Total'),
'tasks:solutions': _('Number of decisions'),
'tasks:total_ts': _('Total to be solved'),
'tasks:start_ts': _('Start solution date'),
'tasks:finish_ts': _('Finish solution date'),
'tasks:progress_ts': _('Solution progress'),
'tasks:expected_time_ts': _('Expected solution time'),
'subjobs': _('Subjobs'),
'subjobs:total_sj': _('Total to be solved'),
'subjobs:start_sj': _('Start solution date'),
'subjobs:finish_sj': _('Finish solution date'),
'subjobs:progress_sj': _('Solution progress'),
'subjobs:expected_time_sj': _('Expected solution time'),
}
def months_choices():
months = []
for i in range(1, 13):
months.append((i, datetime(2016, i, 1).strftime('%B')))
return months
def years_choices():
curr_year = datetime.now().year
return list(range(curr_year - 3, curr_year + 1))
def is_readable(filename):
ext = os.path.splitext(filename)[1]
return len(ext) > 0 and ext[1:] in {'txt', 'json', 'xml', 'c', 'aspect', 'i', 'h', 'tmpl'}
def get_job_parents(user, job):
parent_set = []
next_parent = job.parent
while next_parent is not None:
parent_set.append(next_parent)
next_parent = next_parent.parent
parent_set.reverse()
parents = []
for parent in parent_set:
if JobAccess(user, parent).can_view():
job_id = parent.pk
else:
job_id = None
parents.append({'pk': job_id, 'name': parent.name})
return parents
def get_job_children(user, job):
children = []
for child in job.children.order_by('change_date'):
if JobAccess(user, child).can_view():
children.append({'pk': child.pk, 'name': child.name})
return children
class JobAccess:
def __init__(self, user, job=None):
self.user = user
self.job = job
self._is_author = False
self._job_role = None
self._user_role = user.extended.role
self._is_manager = (self._user_role == USER_ROLES[2][0])
self._is_expert = (self._user_role == USER_ROLES[3][0])
self._is_service = (self._user_role == USER_ROLES[4][0])
self._is_operator = False
try:
if self.job is not None:
self._is_operator = (user == self.job.reportroot.user)
except ObjectDoesNotExist:
pass
self.__get_prop(user)
def klever_core_access(self):
if self.job is None:
return False
return self._is_manager or self._is_service
def can_decide(self):
if self.job is None or self.job.status in [JOB_STATUS[1][0], JOB_STATUS[2][0], JOB_STATUS[6][0]]:
return False
return self._is_manager or self._is_author or self._job_role in [JOB_ROLES[3][0], JOB_ROLES[4][0]]
def can_upload_reports(self):
if self.job is None or self.job.status in [JOB_STATUS[1][0], JOB_STATUS[2][0], JOB_STATUS[6][0]]:
return False
return self._is_manager or self._is_author or self._job_role in [JOB_ROLES[3][0], JOB_ROLES[4][0]]
def can_view(self):
if self.job is None:
return False
return self._is_manager or self._is_author or self._job_role != JOB_ROLES[0][0] or self._is_expert
def can_view_jobs(self, filters=None):
queryset = Job.objects.all()
if isinstance(filters, dict):
queryset = queryset.filter(**filters)
elif filters is not None:
queryset = queryset.filter(filters)
queryset = queryset.only('id')
all_jobs = set(j_id for j_id, in queryset.values_list('id'))
if self._is_manager or self._is_expert:
return all_jobs
author_of = set(jh.job_id for jh in JobHistory.objects.filter(version=1, change_author=self.user))
jobs_with_no_access = self.__get_jobs_with_roles([JOB_ROLES[0][0]])
return all_jobs - (jobs_with_no_access - author_of)
def can_create(self):
return self._user_role not in [USER_ROLES[0][0], USER_ROLES[4][0]]
def can_edit(self):
if self.job is None:
return False
return self.job.status not in [JOB_STATUS[1][0], JOB_STATUS[2][0], JOB_STATUS[6][0]] \
and (self._is_author or self._is_manager)
def can_stop(self):
if self.job is None:
return False
if self.job.status in [JOB_STATUS[1][0], JOB_STATUS[2][0]] and (self._is_operator or self._is_manager):
return True
return False
def can_delete(self):
if self.job is None:
return False
for ch in self.job.children.all():
if not JobAccess(self.user, ch).can_delete():
return False
if self._is_manager and self.job.status == JOB_STATUS[3]:
return True
if self.job.status in [JOB_STATUS[1][0], JOB_STATUS[2][0]]:
return False
return self._is_author or self._is_manager
def can_download(self):
return self.job is not None and self.job.status != JOB_STATUS[2][0]
def can_collapse(self):
if self.job is None:
return False
return self.job.status not in {JOB_STATUS[1][0], JOB_STATUS[2][0], JOB_STATUS[6][0]} \
and (self._is_author or self._is_manager) and self.job.weight == JOB_WEIGHT[0][0]
def can_clear_verifications(self):
if self.job is None or self.job.status in {JOB_STATUS[1][0], JOB_STATUS[2][0], JOB_STATUS[6][0]}:
return False
if not (self._is_author or self._is_manager):
return False
try:
return ReportComponent.objects.filter(root=self.job.reportroot, verification=True)\
.exclude(verifier_input='').count() > 0
except ObjectDoesNotExist:
return False
def can_dfc(self):
return self.job is not None and self.job.status not in [JOB_STATUS[0][0], JOB_STATUS[1][0]]
def __get_jobs_with_roles(self, roles):
jobs = set()
for j_id, in UserRole.objects.filter(user=self.user, job__version=F('job__job__version'), role__in=roles) \
.values_list('job__job_id'):
jobs.add(j_id)
for j_id, role in JobHistory.objects.exclude(job_id__in=jobs)\
.filter(version=F('job__version'), global_role__in=roles) \
.values_list('job_id', 'global_role'):
jobs.add(j_id)
return jobs
def __get_prop(self, user):
if self.job is not None:
try:
first_version = self.job.versions.get(version=1)
last_version = self.job.versions.get(version=self.job.version)
except ObjectDoesNotExist:
return
self._is_author = (first_version.change_author == user)
last_v_role = last_version.userrole_set.filter(user=user)
if len(last_v_role) > 0:
self._job_role = last_v_role[0].role
else:
self._job_role = last_version.global_role
def get_job_by_identifier(identifier):
found_jobs = Job.objects.filter(identifier__startswith=identifier)
if len(found_jobs) == 0:
raise BridgeException(_('The job with specified identifier was not found'))
elif len(found_jobs) > 1:
raise BridgeException(_('Several jobs match the specified identifier, '
'please increase the length of the job identifier'))
return found_jobs[0]
def get_job_by_name_or_id(name_or_id):
try:
return Job.objects.get(name=name_or_id)
except ObjectDoesNotExist:
found_jobs = Job.objects.filter(identifier__startswith=name_or_id)
if len(found_jobs) == 0:
raise BridgeException(_('The job with specified identifier or name was not found'))
elif len(found_jobs) > 1:
raise BridgeException(_('Several jobs match the specified identifier, '
'please increase the length of the job identifier'))
return found_jobs[0]
class FileData:
def __init__(self, job):
self.filedata = []
self.__get_filedata(job)
self.__order_by_lvl()
def __get_filedata(self, job):
for f in job.filesystem_set\
.annotate(is_file=Case(When(file=None, then=0), default=1, output_field=IntegerField()))\
.order_by('is_file', 'name').select_related('file'):
self.filedata.append({
'id': f.pk,
'title': f.name,
'parent': f.parent_id,
'type': f.is_file,
'hash_sum': f.file.hash_sum if f.is_file else None
})
def __order_by_lvl(self):
ordered_data = []
first_lvl = []
other_data = []
for fd in self.filedata:
if fd['parent'] is None:
first_lvl.append(fd)
else:
other_data.append(fd)
def __get_all_children(file_info):
children = []
if file_info['type'] == 1:
return children
for fi in other_data:
if fi['parent'] == file_info['id']:
children.append(fi)
children.extend(__get_all_children(fi))
return children
for fd in first_lvl:
ordered_data.append(fd)
ordered_data.extend(__get_all_children(fd))
self.filedata = ordered_data
class SaveFileData:
def __init__(self, filedata, job):
self.filedata = filedata
self.job = job
self.filedata_by_lvl = []
self.__check_data()
self._files = self.__get_files()
self.__save_file_data()
def __save_file_data(self):
saved_files = {}
for lvl in self.filedata_by_lvl:
for lvl_elem in lvl:
fs_elem = FileSystem(job=self.job)
if lvl_elem['parent']:
fs_elem.parent = saved_files[lvl_elem['parent']]
if lvl_elem['type'] == '1':
if lvl_elem['hash_sum'] not in self._files:
raise ValueError('The file was not uploaded before')
fs_elem.file = self._files[lvl_elem['hash_sum']]
if not all(ord(c) < 128 for c in lvl_elem['title']):
t_size = len(lvl_elem['title'])
if t_size > 30:
lvl_elem['title'] = lvl_elem['title'][(t_size - 30):]
fs_elem.name = lvl_elem['title']
fs_elem.save()
saved_files[lvl_elem['id']] = fs_elem
return None
def __check_data(self):
num_of_elements = 0
element_of_lvl = []
cnt = 0
while num_of_elements < len(self.filedata):
cnt += 1
if cnt > 1000:
raise ValueError('The file is too deep, maybe there is a loop in the files tree')
num_of_elements += len(element_of_lvl)
element_of_lvl = self.__get_lower_level(element_of_lvl)
if len(element_of_lvl):
self.filedata_by_lvl.append(element_of_lvl)
for lvl in self.filedata_by_lvl:
names_with_parents = set()
for fd in lvl:
if len(fd['title']) == 0:
raise ValueError("The file/folder name can't be empty")
if not all(ord(c) < 128 for c in fd['title']):
title_size = len(fd['title'])
if title_size > 30:
fd['title'] = fd['title'][(title_size - 30):]
if fd['type'] == '1' and fd['hash_sum'] is None:
raise ValueError('The file was not uploaded before')
if fd['parent'] is not None:
rel_path = "%s/%s" % (fd['parent'], fd['title'])
else:
rel_path = fd['title']
if rel_path in names_with_parents:
raise ValueError("The same names in one folder found")
names_with_parents.add(rel_path)
def | |
in view)
self.assertEqual(view['x'], 1)
self.assertEqual(view['y'], 2)
self.assertRaises(KeyError, view.__getitem__, 'z')
self.assertEqual(tuple(sorted(view)), ('x', 'y'))
self.assertEqual(len(view), 2)
copy = view.copy()
self.assertIsNot(copy, mapping)
self.assertIsInstance(copy, collections.ChainMap)
self.assertEqual(copy, mapping)
self.assertEqual(view.get('x'), 1)
self.assertEqual(view.get('y'), 2)
self.assertIsNone(view.get('z'))
self.assertEqual(tuple(sorted(view.items())), (('x', 1), ('y', 2)))
self.assertEqual(tuple(sorted(view.keys())), ('x', 'y'))
self.assertEqual(tuple(sorted(view.values())), (1, 2))
def test_contains(self):
view = self.mappingproxy(dict.fromkeys('abc'))
self.assertTrue('a' in view)
self.assertTrue('b' in view)
self.assertTrue('c' in view)
self.assertFalse('xxx' in view)
def test_views(self):
mapping = {}
view = self.mappingproxy(mapping)
keys = view.keys()
values = view.values()
items = view.items()
self.assertEqual(list(keys), [])
self.assertEqual(list(values), [])
self.assertEqual(list(items), [])
mapping['key'] = 'value'
self.assertEqual(list(keys), ['key'])
self.assertEqual(list(values), ['value'])
self.assertEqual(list(items), [('key', 'value')])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_len(self):
for expected in range(6):
data = dict.fromkeys('abcde'[:expected])
self.assertEqual(len(data), expected)
view = self.mappingproxy(data)
self.assertEqual(len(view), expected)
def test_iterators(self):
keys = ('x', 'y')
values = (1, 2)
items = tuple(zip(keys, values))
view = self.mappingproxy(dict(items))
self.assertEqual(set(view), set(keys))
self.assertEqual(set(view.keys()), set(keys))
self.assertEqual(set(view.values()), set(values))
self.assertEqual(set(view.items()), set(items))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reversed(self):
d = {'a': 1, 'b': 2, 'foo': 0, 'c': 3, 'd': 4}
mp = self.mappingproxy(d)
del d['foo']
r = reversed(mp)
self.assertEqual(list(r), list('dcba'))
self.assertRaises(StopIteration, next, r)
def test_copy(self):
original = {'key1': 27, 'key2': 51, 'key3': 93}
view = self.mappingproxy(original)
copy = view.copy()
self.assertEqual(type(copy), dict)
self.assertEqual(copy, original)
original['key1'] = 70
self.assertEqual(view['key1'], 70)
self.assertEqual(copy['key1'], 27)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_union(self):
mapping = {'a': 0, 'b': 1, 'c': 2}
view = self.mappingproxy(mapping)
with self.assertRaises(TypeError):
view | [('r', 2), ('d', 2)]
with self.assertRaises(TypeError):
[('r', 2), ('d', 2)] | view
with self.assertRaises(TypeError):
view |= [('r', 2), ('d', 2)]
other = {'c': 3, 'p': 0}
self.assertDictEqual(view | other, {'a': 0, 'b': 1, 'c': 3, 'p': 0})
self.assertDictEqual(other | view, {'c': 2, 'p': 0, 'a': 0, 'b': 1})
self.assertEqual(view, {'a': 0, 'b': 1, 'c': 2})
self.assertDictEqual(mapping, {'a': 0, 'b': 1, 'c': 2})
self.assertDictEqual(other, {'c': 3, 'p': 0})
class ClassCreationTests(unittest.TestCase):
class Meta(type):
def __init__(cls, name, bases, ns, **kw):
super().__init__(name, bases, ns)
@staticmethod
def __new__(mcls, name, bases, ns, **kw):
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases, **kw):
ns = super().__prepare__(name, bases)
ns["y"] = 1
ns.update(kw)
return ns
def test_new_class_basics(self):
C = types.new_class("C")
self.assertEqual(C.__name__, "C")
self.assertEqual(C.__bases__, (object,))
def test_new_class_subclass(self):
C = types.new_class("C", (int,))
self.assertTrue(issubclass(C, int))
def test_new_class_meta(self):
Meta = self.Meta
settings = {"metaclass": Meta, "z": 2}
# We do this twice to make sure the passed in dict isn't mutated
for i in range(2):
C = types.new_class("C" + str(i), (), settings)
self.assertIsInstance(C, Meta)
self.assertEqual(C.y, 1)
self.assertEqual(C.z, 2)
def test_new_class_exec_body(self):
Meta = self.Meta
def func(ns):
ns["x"] = 0
C = types.new_class("C", (), {"metaclass": Meta, "z": 2}, func)
self.assertIsInstance(C, Meta)
self.assertEqual(C.x, 0)
self.assertEqual(C.y, 1)
self.assertEqual(C.z, 2)
def test_new_class_metaclass_keywords(self):
#Test that keywords are passed to the metaclass:
def meta_func(name, bases, ns, **kw):
return name, bases, ns, kw
res = types.new_class("X",
(int, object),
dict(metaclass=meta_func, x=0))
self.assertEqual(res, ("X", (int, object), {}, {"x": 0}))
def test_new_class_defaults(self):
# Test defaults/keywords:
C = types.new_class("C", (), {}, None)
self.assertEqual(C.__name__, "C")
self.assertEqual(C.__bases__, (object,))
def test_new_class_meta_with_base(self):
Meta = self.Meta
def func(ns):
ns["x"] = 0
C = types.new_class(name="C",
bases=(int,),
kwds=dict(metaclass=Meta, z=2),
exec_body=func)
self.assertTrue(issubclass(C, int))
self.assertIsInstance(C, Meta)
self.assertEqual(C.x, 0)
self.assertEqual(C.y, 1)
self.assertEqual(C.z, 2)
def test_new_class_with_mro_entry(self):
class A: pass
class C:
def __mro_entries__(self, bases):
return (A,)
c = C()
D = types.new_class('D', (c,), {})
self.assertEqual(D.__bases__, (A,))
self.assertEqual(D.__orig_bases__, (c,))
self.assertEqual(D.__mro__, (D, A, object))
def test_new_class_with_mro_entry_none(self):
class A: pass
class B: pass
class C:
def __mro_entries__(self, bases):
return ()
c = C()
D = types.new_class('D', (A, c, B), {})
self.assertEqual(D.__bases__, (A, B))
self.assertEqual(D.__orig_bases__, (A, c, B))
self.assertEqual(D.__mro__, (D, A, B, object))
def test_new_class_with_mro_entry_error(self):
class A: pass
class C:
def __mro_entries__(self, bases):
return A
c = C()
with self.assertRaises(TypeError):
types.new_class('D', (c,), {})
def test_new_class_with_mro_entry_multiple(self):
class A1: pass
class A2: pass
class B1: pass
class B2: pass
class A:
def __mro_entries__(self, bases):
return (A1, A2)
class B:
def __mro_entries__(self, bases):
return (B1, B2)
D = types.new_class('D', (A(), B()), {})
self.assertEqual(D.__bases__, (A1, A2, B1, B2))
def test_new_class_with_mro_entry_multiple_2(self):
class A1: pass
class A2: pass
class A3: pass
class B1: pass
class B2: pass
class A:
def __mro_entries__(self, bases):
return (A1, A2, A3)
class B:
def __mro_entries__(self, bases):
return (B1, B2)
class C: pass
D = types.new_class('D', (A(), C, B()), {})
self.assertEqual(D.__bases__, (A1, A2, A3, C, B1, B2))
# Many of the following tests are derived from test_descr.py
def test_prepare_class(self):
# Basic test of metaclass derivation
expected_ns = {}
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
def __prepare__(*args):
return expected_ns
B = types.new_class("B", (object,))
C = types.new_class("C", (object,), {"metaclass": A})
# The most derived metaclass of D is A rather than type.
meta, ns, kwds = types.prepare_class("D", (B, C), {"metaclass": type})
self.assertIs(meta, A)
self.assertIs(ns, expected_ns)
self.assertEqual(len(kwds), 0)
def test_bad___prepare__(self):
# __prepare__() must return a mapping.
class BadMeta(type):
@classmethod
def __prepare__(*args):
return None
with self.assertRaisesRegex(TypeError,
r'^BadMeta\.__prepare__\(\) must '
r'return a mapping, not NoneType$'):
class Foo(metaclass=BadMeta):
pass
# Also test the case in which the metaclass is not a type.
class BadMeta:
@classmethod
def __prepare__(*args):
return None
with self.assertRaisesRegex(TypeError,
r'^<metaclass>\.__prepare__\(\) must '
r'return a mapping, not NoneType$'):
class Bar(metaclass=BadMeta()):
pass
def test_resolve_bases(self):
class A: pass
class B: pass
class C:
def __mro_entries__(self, bases):
if A in bases:
return ()
return (A,)
c = C()
self.assertEqual(types.resolve_bases(()), ())
self.assertEqual(types.resolve_bases((c,)), (A,))
self.assertEqual(types.resolve_bases((C,)), (C,))
self.assertEqual(types.resolve_bases((A, C)), (A, C))
self.assertEqual(types.resolve_bases((c, A)), (A,))
self.assertEqual(types.resolve_bases((A, c)), (A,))
x = (A,)
y = (C,)
z = (A, C)
t = (A, C, B)
for bases in [x, y, z, t]:
self.assertIs(types.resolve_bases(bases), bases)
def test_metaclass_derivation(self):
# issue1294232: correct metaclass calculation
new_calls = [] # to check the order of __new__ calls
class AMeta(type):
def __new__(mcls, name, bases, ns):
new_calls.append('AMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
return {}
class BMeta(AMeta):
def __new__(mcls, name, bases, ns):
new_calls.append('BMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
ns = super().__prepare__(name, bases)
ns['BMeta_was_here'] = True
return ns
A = types.new_class("A", (), {"metaclass": AMeta})
self.assertEqual(new_calls, ['AMeta'])
new_calls.clear()
B = types.new_class("B", (), {"metaclass": BMeta})
# BMeta.__new__ calls AMeta.__new__ with super:
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
C = types.new_class("C", (A, B))
# The most derived metaclass is BMeta:
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
# BMeta.__prepare__ should've been called:
self.assertIn('BMeta_was_here', C.__dict__)
# The order of the bases shouldn't matter:
C2 = types.new_class("C2", (B, A))
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
self.assertIn('BMeta_was_here', C2.__dict__)
# Check correct metaclass calculation when a metaclass is declared:
D = types.new_class("D", (C,), {"metaclass": type})
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
self.assertIn('BMeta_was_here', D.__dict__)
E = types.new_class("E", (C,), {"metaclass": AMeta})
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
self.assertIn('BMeta_was_here', E.__dict__)
def test_metaclass_override_function(self):
# Special case: the given metaclass isn't a class,
# so there is no metaclass calculation.
class A(metaclass=self.Meta):
pass
marker = object()
def func(*args, **kwargs):
return marker
X = types.new_class("X", (), {"metaclass": func})
Y = types.new_class("Y", (object,), {"metaclass": func})
Z = types.new_class("Z", (A,), {"metaclass": func})
self.assertIs(marker, X)
self.assertIs(marker, Y)
self.assertIs(marker, Z)
def test_metaclass_override_callable(self):
# The given metaclass is a class,
# but not a descendant of type.
new_calls = [] # to check the order of __new__ calls
prepare_calls = [] # to track __prepare__ calls
class ANotMeta:
def __new__(mcls, *args, **kwargs):
new_calls.append('ANotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('ANotMeta')
return {}
class BNotMeta(ANotMeta):
def __new__(mcls, *args, **kwargs):
new_calls.append('BNotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('BNotMeta')
return super().__prepare__(name, bases)
A = types.new_class("A", (), {"metaclass": ANotMeta})
self.assertIs(ANotMeta, type(A))
self.assertEqual(prepare_calls, ['ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['ANotMeta'])
new_calls.clear()
B = types.new_class("B", (), {"metaclass": BNotMeta})
self.assertIs(BNotMeta, type(B))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
C = types.new_class("C", (A, B))
self.assertIs(BNotMeta, type(C))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
C2 = types.new_class("C2", (B, A))
self.assertIs(BNotMeta, type(C2))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
# This is a TypeError, because of a metaclass conflict:
# BNotMeta is neither a subclass, nor a superclass of type
with self.assertRaises(TypeError):
D = types.new_class("D", (C,), {"metaclass": type})
E = types.new_class("E", (C,), {"metaclass": ANotMeta})
self.assertIs(BNotMeta, type(E))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
F = types.new_class("F", (object(), C))
self.assertIs(BNotMeta, type(F))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
F2 = types.new_class("F2", (C, object()))
self.assertIs(BNotMeta, type(F2))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
# TypeError: | |
None:
source_hash_id: Optional[str] = params.get("content_hash_id")
try:
existing_file = self.get_nondeleted_recycled_file(Files.hash_id == source_hash_id)
self.check_file_permissions([existing_file], current_user, ['readable'],
permit_recycled=True)
except RecordNotFound:
raise ValidationError(f"The requested file or directory to clone from "
f"({source_hash_id}) could not be found.",
"content_hash_id")
if existing_file.mime_type == DirectoryTypeProvider.MIME_TYPE:
raise ValidationError(f"The specified clone source ({source_hash_id}) "
f"is a folder and that is not supported.", "mime_type")
file.mime_type = existing_file.mime_type
file.doi = existing_file.doi
file.annotations = existing_file.annotations
file.annotations_date = existing_file.annotations_date
file.custom_annotations = existing_file.custom_annotations
file.upload_url = existing_file.upload_url
file.excluded_annotations = existing_file.excluded_annotations
file.content_id = existing_file.content_id
if 'description' not in params:
file.description = existing_file.description
# Create operation
else:
buffer, url = self._get_content_from_params(params)
# Figure out file size
buffer.seek(0, io.SEEK_END)
size = buffer.tell()
buffer.seek(0)
# Check max file size
if size > self.file_max_size:
raise ValidationError(
'Your file could not be processed because it is too large.')
# Save the URL
file.upload_url = url
mime_type = params.get('mime_type')
# Detect mime type
if mime_type:
file.mime_type = mime_type
else:
mime_type = file_type_service.detect_mime_type(buffer)
buffer.seek(0) # Must rewind
file.mime_type = mime_type
# Get the provider based on what we know now
provider = file_type_service.get(file)
# if no provider matched try to convert
# if it is a bioc-xml file
if isinstance(provider, BiocTypeProvider):
# then convert it to BiocJSON
provider.convert(buffer)
file_name, ext = os.path.splitext(file.filename)
# if ext is not bioc then set it bioc.
if ext.lower() != '.bioc':
file.filename = file_name + '.bioc'
if provider == file_type_service.default_provider:
file_name, extension = os.path.splitext(file.filename)
if extension.isupper():
file.mime_type = 'application/pdf'
provider = file_type_service.get(file)
provider.convert(buffer)
# Check if the user can even upload this type of file
if not provider.can_create():
raise ValidationError(f"The provided file type is not accepted.")
# Validate the content
try:
provider.validate_content(buffer)
buffer.seek(0) # Must rewind
except ValueError as e:
raise ValidationError(f"The provided file may be corrupt: {str(e)}")
# Get the DOI
file.doi = provider.extract_doi(buffer)
buffer.seek(0) # Must rewind
# Save the file content if there's any
if size:
file.content_id = FileContent.get_or_create(buffer)
buffer.seek(0) # Must rewind
try:
buffer.close()
except Exception:
pass
# ========================================
# Annotation options
# ========================================
if params.get('fallback_organism'):
db.session.add(params['fallback_organism'])
file.fallback_organism = params['fallback_organism']
if params.get('annotation_configs'):
file.annotation_configs = params['annotation_configs']
# ========================================
# Commit and filename conflict resolution
# ========================================
# Filenames could conflict, so we may need to generate a new filename
# Trial 1: First attempt
# Trial 2: Try adding (N+1) to the filename and try again
# Trial 3: Try adding (N+1) to the filename and try again (in case of a race condition)
# Trial 4: Give up
# Trial 3 only does something if the transaction mode is in READ COMMITTED or worse (!)
for trial in range(4):
if 1 <= trial <= 2: # Try adding (N+1)
try:
file.filename = file.generate_non_conflicting_filename()
except ValueError:
raise ValidationError(
'Filename conflicts with an existing file in the same folder.',
"filename")
elif trial == 3: # Give up
raise ValidationError(
'Filename conflicts with an existing file in the same folder.',
"filename")
try:
db.session.begin_nested()
db.session.add(file)
db.session.commit()
break
except IntegrityError as e:
# Warning: this could catch some other integrity error
db.session.rollback()
db.session.commit()
# ========================================
# Return new file
# ========================================
return self.get_file_response(file.hash_id, current_user)
@use_args(lambda request: BulkFileRequestSchema(),
locations=['json', 'form', 'files', 'mixed_form_json'])
@use_args(lambda request: BulkFileUpdateRequestSchema(partial=True),
locations=['json', 'form', 'files', 'mixed_form_json'])
def patch(self, targets, params):
"""File update endpoint."""
# do NOT write any code before those two lines - it will cause some unit tests to fail
current_user = g.current_user
missing_hash_ids = self.update_files(targets['hash_ids'], params, current_user)
linked_files = params.pop('hashes_of_linked', [])
files = self.get_nondeleted_recycled_files(Files.hash_id.in_(targets['hash_ids']))
map_id = None
to_add, new_ids = [], []
if files:
file = files[0]
if file.mime_type == FILE_MIME_TYPE_MAP:
map_id = file.id
new_files = self.get_nondeleted_recycled_files(Files.hash_id.in_(linked_files))
new_ids = [file.id for file in new_files]
# Possibly could be optimized with some get_or_create or insert_if_not_exist
to_add = [MapLinks(map_id=map_id, linked_id=file.id) for file in new_files if not
db.session.query(MapLinks).filter_by(map_id=map_id, linked_id=file.id
).scalar()]
response = self.get_bulk_file_response(targets['hash_ids'], current_user,
missing_hash_ids=missing_hash_ids)
# Add changes to the MapLinks after then response generation, as it might raise exceptions
try:
if to_add:
db.session.bulk_save_objects(to_add)
if map_id:
delete_count = db.session.query(MapLinks).filter(MapLinks.map_id == map_id,
MapLinks.linked_id.notin_(new_ids)
).delete(synchronize_session=False)
if to_add or delete_count:
db.session.commit()
except SQLAlchemyError:
db.session.rollback()
raise
return response
# noinspection DuplicatedCode
@use_args(lambda request: BulkFileRequestSchema())
def delete(self, targets):
"""File delete endpoint."""
current_user = g.current_user
hash_ids = targets['hash_ids']
files = self.get_nondeleted_recycled_files(Files.hash_id.in_(hash_ids))
self.check_file_permissions(files, current_user, ['writable'], permit_recycled=True)
# ========================================
# Apply
# ========================================
for file in files:
children = self.get_nondeleted_recycled_files(and_(
Files.parent_id == file.id,
Files.recycling_date.is_(None),
))
# For now, we won't let people delete non-empty folders (although this code
# is subject to a race condition) because the app doesn't handle deletion that well
# yet and the children would just become orphan files that would still be
# accessible but only by URL and with no easy way to delete them
if len(children):
raise ValidationError('Only empty folders can be deleted.', 'hash_ids')
if file.calculated_project.root_id == file.id:
raise ValidationError(f"You cannot delete the root directory "
f"for a project (the folder for the project "
f"'{file.calculated_project.name}' was specified).")
if not file.recycled:
file.recycling_date = datetime.now()
file.recycler = current_user
file.modifier = current_user
if not file.deleted:
file.deletion_date = datetime.now()
file.deleter = current_user
file.modifier = current_user
db.session.commit()
# ========================================
# Return changed files
# ========================================
return jsonify(MultipleFileResponseSchema().dump(dict(
mapping={},
missing=[],
)))
def _get_content_from_params(self, params: dict) -> Tuple[io.BufferedIOBase, Optional[str]]:
url = params.get('content_url')
buffer = params.get('content_value')
# Fetch from URL
if url is not None:
try:
buffer = read_url(
urllib.request.Request(url, headers={
'User-Agent': self.url_fetch_user_agent,
}),
max_length=self.file_max_size,
timeout=self.url_fetch_timeout,
prefer_direct_downloads=True
)
except Exception:
raise ValidationError('Your file could not be downloaded, either because it is '
'inaccessible or another problem occurred. Please double '
'check the spelling of the URL. You can also download '
'the file to your computer from the original website and '
'upload the file manually.', "content_url")
return buffer, url
# Fetch from upload
elif buffer is not None:
return buffer, None
else:
return typing.cast(io.BufferedIOBase, io.BytesIO()), None
class FileSearchView(FilesystemBaseView):
decorators = [auth.login_required]
@use_args(FileSearchRequestSchema)
@use_args(PaginatedRequestSchema)
def post(self, params: dict, pagination: dict):
current_user = g.current_user
if params['type'] == 'public':
# First we query for public files without getting parent directory
# or project information
query = db.session.query(Files.id) \
.filter(Files.recycling_date.is_(None),
Files.deletion_date.is_(None),
Files.public.is_(True)) \
.order_by(*params['sort'])
if 'mime_types' in params:
query = query.filter(Files.mime_type.in_(params['mime_types']))
result = query.paginate(pagination['page'], pagination['limit'])
# Now we get the full file information for this slice of the results
files = self.get_nondeleted_recycled_files(Files.id.in_(result.items))
total = result.total
elif params['type'] == 'linked':
hash_id = params['linked_hash_id']
file = self.get_nondeleted_recycled_file(Files.hash_id == hash_id,
lazy_load_content=True)
self.check_file_permissions([file], current_user, ['readable'], permit_recycled=True)
# TODO: Sort?
query = db.session.query(MapLinks.map_id) \
.filter(MapLinks.linked_id == file.id)
result = query.paginate(pagination['page'], pagination['limit'])
# Now we get the full file information for this slice of the results
files = self.get_nondeleted_recycled_files(Files.id.in_(result.items))
total = len(files)
else:
raise NotImplementedError()
return jsonify(FileListSchema(context={
'user_privilege_filter': g.current_user.id,
}, exclude=(
'results.children',
)).dump({
'total': total,
'results': files,
}))
class FileDetailView(FilesystemBaseView):
decorators = [auth.login_required]
def get(self, hash_id: str):
"""Fetch a single file."""
current_user = g.current_user
return self.get_file_response(hash_id, current_user)
@use_args(lambda request: FileUpdateRequestSchema(partial=True),
locations=['json', 'form', 'files', 'mixed_form_json'])
def patch(self, params: dict, hash_id: str):
"""Update a single file."""
current_user = g.current_user
self.update_files([hash_id], params, current_user)
return self.get(hash_id)
class FileContentView(FilesystemBaseView):
decorators = [auth.login_required]
def get(self, hash_id: str):
"""Fetch a single file's content."""
current_user = g.current_user
file = self.get_nondeleted_recycled_file(Files.hash_id == hash_id, lazy_load_content=True)
self.check_file_permissions([file], current_user, ['readable'], permit_recycled=True)
# Lazy loaded
if file.content:
content = file.content.raw_file
etag = file.content.checksum_sha256.hex()
else:
content = b''
etag = hashlib.sha256(content).digest()
return make_cacheable_file_response(
request,
content,
etag=etag,
filename=file.filename,
mime_type=file.mime_type
)
class MapContentView(FilesystemBaseView):
decorators = [auth.login_required]
def get(self, hash_id: str):
"""Fetch a content (graph.json) from a map."""
current_user = g.current_user
file = self.get_nondeleted_recycled_file(Files.hash_id == hash_id, lazy_load_content=True)
self.check_file_permissions([file], current_user, ['readable'], permit_recycled=True)
if file.mime_type != FILE_MIME_TYPE_MAP:
raise ValidationError(f'Cannot retrieve map content from file with mime type: '
f'{file.mime_type}')
try:
zip_file = zipfile.ZipFile(io.BytesIO(file.content.raw_file))
json_graph = zip_file.read('graph.json')
except (KeyError, zipfile.BadZipFile):
raise ValidationError(
'Cannot retrieve contents of the file - it might be corrupted')
etag = hashlib.sha256(json_graph).hexdigest()
return make_cacheable_file_response(
request,
json_graph,
etag=etag,
filename=file.filename,
mime_type=file.mime_type
)
class FileExportView(FilesystemBaseView):
decorators = [auth.login_required]
# Move that to constants if accepted
@use_args(FileExportRequestSchema)
def post(self, params: dict, hash_id: str):
"""Export a file."""
current_user = g.current_user
file = self.get_nondeleted_recycled_file(Files.hash_id == hash_id, lazy_load_content=True)
self.check_file_permissions([file], current_user, ['readable'], permit_recycled=True)
file_type_service = get_file_type_service()
file_type = file_type_service.get(file)
if params['export_linked'] and params['format'] in | |
# -*- coding: utf-8 -*-
#
# Module for all kinds of parsing/input sanitization gymnastics
#
# Builtin/3rd party package imports
import os
import numpy as np
# Local imports
from syncopy.shared.filetypes import FILE_EXT
from syncopy.shared.errors import (SPYIOError, SPYTypeError, SPYValueError,
SPYWarning)
__all__ = []
def io_parser(fs_loc, varname="", isfile=True, ext="", exists=True):
"""
Parse file-system location strings for reading/writing files/directories
Parameters
----------
fs_loc : str
String pointing to (hopefully valid) file-system location
(absolute/relative path of file or directory ).
varname : str
Local variable name used in caller, see Examples for details.
isfile : bool
Indicates whether `fs_loc` points to a file (`isfile = True`) or
directory (`isfile = False`)
ext : str or 1darray-like
Valid filename extension(s). Can be a single string (e.g., `ext = "lfp"`)
or a list/1darray of valid extensions (e.g., `ext = ["lfp", "mua"]`).
exists : bool
If `exists = True` ensure that file-system location specified by `fs_loc` exists
(typically used when reading from `fs_loc`), otherwise (`exists = False`)
check for already present conflicting files/directories (typically used when
creating/writing to `fs_loc`).
Returns
-------
fs_path : str
Absolute path of `fs_loc`.
fs_name : str (only if `isfile = True`)
Name (including extension) of input file (without path).
Examples
--------
To test whether `"/path/to/dataset.lfp"` points to an existing file, one
might use
>>> io_parser("/path/to/dataset.lfp")
'/path/to', 'dataset.lfp'
The following call ensures that a folder called "mydata" can be safely
created in the current working directory
>>> io_parser("mydata", isfile=False, exists=False)
'/path/to/cwd/mydata'
Suppose a routine wants to save data to a file with potential
extensions `".lfp"` or `".mua"`. The following call may be used to ensure
the user input `dsetname = "relative/dir/dataset.mua"` is a valid choice:
>>> abs_path, filename = io_parser(dsetname, varname="dsetname", ext=["lfp", "mua"], exists=False)
>>> abs_path
'/full/path/to/relative/dir/'
>>> filename
'dataset.mua'
"""
# Start by resovling potential conflicts
if not isfile and len(ext) > 0:
msg = "filename extension(s) specified but `isfile = False`. Exiting..."
SPYWarning(msg)
return
# Make sure `fs_loc` is actually a string
if not isinstance(fs_loc, str):
raise SPYTypeError(fs_loc, varname=varname, expected=str)
# Avoid headaches, use absolute paths...
fs_loc = os.path.abspath(os.path.expanduser(fs_loc))
# Ensure that filesystem object does/does not exist
if exists and not os.path.exists(fs_loc):
raise SPYIOError(fs_loc, exists=False)
if not exists and os.path.exists(fs_loc):
raise SPYIOError(fs_loc, exists=True)
# First, take care of directories...
if not isfile:
isdir = os.path.isdir(fs_loc)
if (isdir and not exists):
raise SPYIOError (fs_loc, exists=isdir)
elif (not isdir and exists):
raise SPYValueError(legal="directory", actual="file")
else:
return fs_loc
# ...now files
else:
# Separate filename from its path
file_name = os.path.basename(fs_loc)
# If wanted, parse filename extension(s)
if len(ext):
# Extract filename extension and get rid of its dot
file_ext = os.path.splitext(file_name)[1]
file_ext = file_ext.replace(".", "")
# In here, having no extension counts as an error
error = False
if len(file_ext) == 0:
error = True
if file_ext not in str(ext) or error:
if isinstance(ext, (list, np.ndarray)):
ext = "'" + "or '".join(ex + "' " for ex in ext)
raise SPYValueError(ext, varname="filename-extension", actual=file_ext)
# Now make sure file does or does not exist
isfile = os.path.isfile(fs_loc)
if (isfile and not exists):
raise SPYIOError(fs_loc, exists=isfile)
elif (not isfile and exists):
raise SPYValueError(legal="file", actual="directory")
else:
return fs_loc.split(file_name)[0], file_name
def scalar_parser(var, varname="", ntype=None, lims=None):
"""
Parse scalars
Parameters
----------
var : scalar
Scalar quantity to verify
varname : str
Local variable name used in caller, see Examples for details.
ntype : None or str
Expected numerical type of `var`. Possible options include any valid
builtin type as well as `"int_like"` (`var` is expected to have
no significant digits after its decimal point, e.g., 3.0, -12.0 etc.).
If `ntype` is `None` the numerical type of `var` is not checked.
lims : None or two-element list_like
Lower (`lims[0]`) and upper (`lims[1]`) bounds for legal values of `var`.
Note that the code checks for non-strict inequality, i.e., `var = lims[0]` or
`var = lims[1]` are both considered to be valid values of `var`.
Using `lims = [-np.inf, np.inf]` may be employed to ensure that `var` is
finite and non-NaN. For complex scalars bounds-checking is performed
element-wise, that is both real and imaginary part of `var` have to be
inside the bounds provided by `lims` (see Examples for details).
If `lims` is `None` bounds-checking is not performed.
Returns
-------
Nothing : None
Examples
--------
Assume `freq` is supposed to be a scalar with integer-like values between
10 and 1000. The following calls confirm the validity of `freq`
>>> freq = 440
>>> scalar_parser(freq, varname="freq", ntype="int_like", lims=[10, 1000])
>>> freq = 440.0
>>> scalar_parser(freq, varname="freq", ntype="int_like", lims=[10, 1000])
Conversely, these values of `freq` yield errors
>>> freq = 440.5 # not integer-like
>>> scalar_parser(freq, varname="freq", ntype="int_like", lims=[10, 1000])
>>> freq = 2 # outside bounds
>>> scalar_parser(freq, varname="freq", ntype="int_like", lims=[10, 1000])
>>> freq = '440' # not a scalar
>>> scalar_parser(freq, varname="freq", ntype="int_like", lims=[10, 1000])
For complex scalars bounds-checking is performed element-wise on both
real and imaginary part:
>>> scalar_parser(complex(2,-1), lims=[-3, 5]) # valid
>>> scalar_parser(complex(2,-1), lims=[-3, 1]) # invalid since real part is greater than 1
See also
--------
array_parser : similar functionality for parsing array-like objects
"""
# Make sure `var` is a scalar-like number
if not np.issubdtype(type(var), np.number):
raise SPYTypeError(var, varname=varname, expected="scalar")
# If required, parse type ("int_like" is a bit of a special case here...)
if ntype is not None:
if ntype == "int_like":
if np.round(var) != var:
raise SPYValueError(ntype, varname=varname, actual=str(var))
else:
if type(var) != getattr(__builtins__, ntype):
raise SPYTypeError(var, varname=varname, expected=ntype)
# If required perform bounds-check: transform scalar to NumPy array
# to be able to handle complex scalars too
if lims is not None:
if isinstance(var, complex):
val = np.array([var.real, var.imag])
legal = "both real and imaginary part to be "
else:
val = np.array([var])
legal = "value to be "
if np.any(val < lims[0]) or np.any(val > lims[1]) or not np.isfinite(var):
legal += "greater or equals {lb:s} and less or equals {ub:s}"
raise SPYValueError(legal.format(lb=str(lims[0]), ub=str(lims[1])),
varname=varname, actual=str(var))
return
def array_parser(var, varname="", ntype=None, hasinf=None, hasnan=None,
lims=None, dims=None, issorted=None):
"""
Parse array-like objects
Parameters
----------
var : array_like
Array object to verify
varname : str
Local variable name used in caller, see Examples for details.
ntype : None or str
Expected data type of `var`. Possible options are any valid
builtin type, all NumPy dtypes as as well as `"numeric"` (a catch-all
to ensure `var` only contains numeric elements) and "int_like"`
(all elements of `var` are expected to have no significant digits
after the decimal point, e.g., 3.0, -12.0 etc.).
If `ntype` is `None` the data type of `var` is not checked.
hasinf : None or bool
If `hasinf` is `False` the input array `var` is considered invalid
if it contains non-finite elements (`np.inf`), vice-versa if `hasinf`
is `True`. If `hasinf` is `None` elements of `var` are not probed
for finiteness.
hasnan : None or bool
If `hasnan` is `False` the input array `var` is considered invalid
if it contains undefined elements (`np.nan`), vice-versa if `hasnan`
is `True`. If `hasnan` is `None` elements of `var` are not probed
for well-posedness.
lims : None or two-element list_like
Lower (`lims[0]`) and upper (`lims[1]`) bounds for legal values of `var`'s
elements. Note that the code checks for non-strict inequality,
i.e., `var[i] = lims[0]` or `var[i] = lims[1]` are both considered
to be valid elements of `var`.
For complex arrays bounds-checking is performed on both real and
imaginary parts of each component of `var`. That is, all elements of
`var` have to satisfy `lims[0] <= var[i].real <= lims[1]` as well as
`lims[0] <= var[i].imag <= lims[1]` (see Examples for details).
Note that `np.inf` and `np.nan` entries are ignored during bounds-
checking. Use the keywords `hasinf` and `hasnan` to probe an array
for infinite and non-numeric entries, respectively.
If `lims` is `None` bounds-checking is not performed.
dims : None or int or tuple
Expected number of dimensions (if `dims` is an integer) or shape
(if `dims` is a tuple) of `var`. By default, | |
as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't
"""
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock)
# =============================================================================
# AutodocIndex
# =============================================================================
class AutodocIndex(object):
"""
Class to make an RST file that indexes others.
Example:
.. code-block:: python
import logging
from cardinal_pythonlib.logs import *
from cardinal_pythonlib.sphinxtools import *
main_only_quicksetup_rootlogger(level=logging.INFO)
# Example where one index contains another:
subidx = AutodocIndex(
index_filename="~/Documents/code/cardinal_pythonlib/docs/source/autodoc/_index2.rst",
highest_code_dir="~/Documents/code/cardinal_pythonlib",
project_root_dir="~/Documents/code/cardinal_pythonlib",
autodoc_rst_root_dir="~/Documents/code/cardinal_pythonlib/docs/source/autodoc",
source_filenames_or_globs="~/Documents/code/cardinal_pythonlib/docs/*.py",
)
idx = AutodocIndex(
index_filename="~/Documents/code/cardinal_pythonlib/docs/source/autodoc/_index.rst",
highest_code_dir="~/Documents/code/cardinal_pythonlib",
project_root_dir="~/Documents/code/cardinal_pythonlib",
autodoc_rst_root_dir="~/Documents/code/cardinal_pythonlib/docs/source/autodoc",
source_filenames_or_globs="~/Documents/code/cardinal_pythonlib/cardinal_pythonlib/*.py",
)
idx.add_index(subidx)
print(idx.index_content())
idx.write_index_and_rst_files(overwrite=True, mock=True)
# Example with a flat index:
flatidx = AutodocIndex(
index_filename="~/Documents/code/cardinal_pythonlib/docs/source/autodoc/_index.rst",
highest_code_dir="~/Documents/code/cardinal_pythonlib/cardinal_pythonlib",
project_root_dir="~/Documents/code/cardinal_pythonlib",
autodoc_rst_root_dir="~/Documents/code/cardinal_pythonlib/docs/source/autodoc",
source_filenames_or_globs="~/Documents/code/cardinal_pythonlib/cardinal_pythonlib/*.py",
)
print(flatidx.index_content())
flatidx.write_index_and_rst_files(overwrite=True, mock=True)
""" # noqa
def __init__(self,
index_filename: str,
project_root_dir: str,
autodoc_rst_root_dir: str,
highest_code_dir: str,
python_package_root_dir: str = None,
source_filenames_or_globs: Union[str, Iterable[str]] = None,
index_heading_underline_char: str = "-",
source_rst_heading_underline_char: str = "~",
title: str = DEFAULT_INDEX_TITLE,
introductory_rst: str = "",
recursive: bool = True,
skip_globs: List[str] = None,
toctree_maxdepth: int = 1,
method: AutodocMethod = AutodocMethod.BEST,
rst_prefix: str = "",
rst_suffix: str = "",
source_rst_title_style_python: bool = True,
pygments_language_override: Dict[str, str] = None) -> None:
"""
Args:
index_filename:
filename of the index ``.RST`` (ReStructured Text) file to
create
project_root_dir:
top-level directory for the whole project
autodoc_rst_root_dir:
directory within which all automatically generated ``.RST``
files (each to document a specific source file) will be placed.
A directory hierarchy within this directory will be created,
reflecting the structure of the code relative to
``highest_code_dir`` (q.v.).
highest_code_dir:
the "lowest" directory such that all code is found within it;
the directory structure within ``autodoc_rst_root_dir`` is to
``.RST`` files what the directory structure is of the source
files, relative to ``highest_code_dir``.
python_package_root_dir:
if your Python modules live in a directory other than
``project_root_dir``, specify it here
source_filenames_or_globs:
optional string, or list of strings, each describing a file or
glob-style file specification; these are the source filenames
to create automatic RST` for. If you don't specify them here,
you can use :func:`add_source_files`. To add sub-indexes, use
:func:`add_index` and :func:`add_indexes`.
index_heading_underline_char:
the character used to underline the title in the index file
source_rst_heading_underline_char:
the character used to underline the heading in each of the
source files
title:
title for the index
introductory_rst:
extra RST for the index, which goes between the title and the
table of contents
recursive:
use :func:`glob.glob` in recursive mode?
skip_globs:
list of file names or file specifications to skip; e.g.
``['__init__.py']``
toctree_maxdepth:
``maxdepth`` parameter for the ``toctree`` command generated in
the index file
method:
see :class:`FileToAutodocument`
rst_prefix:
optional RST content (e.g. copyright comment) to put early on
in each of the RST files
rst_suffix:
optional RST content to put late on in each of the RST files
source_rst_title_style_python:
make the individual RST files use titles in the style of Python
modules, ``x.y.z``, rather than path style (``x/y/z``); path
style will be used for non-Python files in any case.
pygments_language_override:
if specified, a dictionary mapping file extensions to Pygments
languages (for example: a ``.pro`` file will be autodetected as
Prolog, but you might want to map that to ``none`` for Qt
project files).
"""
assert index_filename
assert project_root_dir
assert autodoc_rst_root_dir
assert isinstance(toctree_maxdepth, int)
assert isinstance(method, AutodocMethod)
self.index_filename = abspath(expanduser(index_filename))
self.title = title
self.introductory_rst = introductory_rst
self.project_root_dir = abspath(expanduser(project_root_dir))
self.autodoc_rst_root_dir = abspath(expanduser(autodoc_rst_root_dir))
self.highest_code_dir = abspath(expanduser(highest_code_dir))
self.python_package_root_dir = (
abspath(expanduser(python_package_root_dir))
if python_package_root_dir else self.project_root_dir
)
self.index_heading_underline_char = index_heading_underline_char
self.source_rst_heading_underline_char = source_rst_heading_underline_char # noqa
self.recursive = recursive
self.skip_globs = skip_globs if skip_globs is not None else DEFAULT_SKIP_GLOBS # noqa
self.toctree_maxdepth = toctree_maxdepth
self.method = method
self.rst_prefix = rst_prefix
self.rst_suffix = rst_suffix
self.source_rst_title_style_python = source_rst_title_style_python
self.pygments_language_override = pygments_language_override or {} # type: Dict[str, str] # noqa
assert isdir(self.project_root_dir), (
f"Not a directory: project_root_dir={self.project_root_dir!r}")
assert relative_filename_within_dir(
filename=self.index_filename,
directory=self.project_root_dir
), (
f"Index file {self.index_filename!r} is not within "
f"project directory {self.project_root_dir!r}"
)
assert relative_filename_within_dir(
filename=self.highest_code_dir,
directory=self.project_root_dir
), (
f"Highest code directory {self.highest_code_dir!r} is not within "
f"project directory {self.project_root_dir!r}"
)
assert relative_filename_within_dir(
filename=self.autodoc_rst_root_dir,
directory=self.project_root_dir
), (
f"Autodoc RST root directory {self.autodoc_rst_root_dir!r} is not "
f"within project directory {self.project_root_dir!r}"
)
assert isinstance(method, AutodocMethod)
assert isinstance(recursive, bool)
self.files_to_index = [] # type: List[Union[FileToAutodocument, AutodocIndex]] # noqa
if source_filenames_or_globs:
self.add_source_files(source_filenames_or_globs)
def __repr__(self) -> str:
return auto_repr(self)
def add_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
method: AutodocMethod = None,
recursive: bool = None,
source_rst_title_style_python: bool = None,
pygments_language_override: Dict[str, str] = None) -> None:
"""
Adds source files to the index.
Args:
source_filenames_or_globs: string containing a filename or a
glob, describing the file(s) to be added, or a list of such
strings
method: optional method to override ``self.method``
recursive: use :func:`glob.glob` in recursive mode? (If ``None``,
the default, uses the version from the constructor.)
source_rst_title_style_python: optional to override
``self.source_rst_title_style_python``
pygments_language_override: optional to override
``self.pygments_language_override``
"""
if not source_filenames_or_globs:
return
if method is None:
# Use the default
method = self.method
if recursive is None:
recursive = self.recursive
if source_rst_title_style_python is None:
source_rst_title_style_python = self.source_rst_title_style_python
if pygments_language_override is None:
pygments_language_override = self.pygments_language_override
# Get a sorted list of filenames
final_filenames = self.get_sorted_source_files(
source_filenames_or_globs,
recursive=recursive
)
# Process that sorted list
for source_filename in final_filenames:
self.files_to_index.append(FileToAutodocument(
source_filename=source_filename,
project_root_dir=self.project_root_dir,
python_package_root_dir=self.python_package_root_dir,
target_rst_filename=self.specific_file_rst_filename(
source_filename
),
method=method,
source_rst_title_style_python=source_rst_title_style_python,
pygments_language_override=pygments_language_override,
))
def get_sorted_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
recursive: bool = True) -> List[str]:
"""
Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process
"""
if isinstance(source_filenames_or_globs, str):
source_filenames_or_globs = [source_filenames_or_globs]
final_filenames = [] # type: List[str]
for sfg in source_filenames_or_globs:
sfg_expanded = expanduser(sfg)
log.debug("Looking for: {!r}", sfg_expanded)
for filename in glob.glob(sfg_expanded, recursive=recursive):
log.debug("Trying: {!r}", filename)
if self.should_exclude(filename):
log.info("Skipping file {!r}", filename)
continue
final_filenames.append(filename)
final_filenames.sort()
return final_filenames
@staticmethod
def filename_matches_glob(filename: str, globtext: str) -> bool:
"""
The ``glob.glob`` function doesn't do exclusion very well. We don't
want to have to specify root directories for exclusion patterns. We
don't want to have to trawl a massive set of files to find exclusion
files. So let's implement a glob match.
Args:
filename: filename
globtext: glob
Returns:
does the filename match the glob?
See also:
- https://stackoverflow.com/questions/20638040/glob-exclude-pattern
"""
# Quick check on basename-only matching
if fnmatch(filename, globtext):
log.debug("{!r} matches {!r}", filename, globtext)
return True
bname = basename(filename)
if fnmatch(bname, globtext):
log.debug("{!r} matches {!r}", bname, globtext)
return True
# Directory matching: is actually accomplished by the code above!
# Otherwise:
return False
def should_exclude(self, filename) -> bool:
"""
Should we exclude this file from consideration?
"""
for skip_glob in self.skip_globs:
if self.filename_matches_glob(filename, skip_glob):
return True
return False
def add_index(self, index: "AutodocIndex") -> None:
"""
Add a sub-index file to this index.
Args:
index: index file to add, as an instance of :class:`AutodocIndex`
"""
self.files_to_index.append(index)
def add_indexes(self, indexes: List["AutodocIndex"]) -> None:
"""
Adds multiple sub-indexes to this index.
Args:
indexes: list of sub-indexes
"""
for index in indexes:
self.add_index(index)
def specific_file_rst_filename(self, source_filename: str) -> str:
"""
Gets the RST filename corresponding to a source filename.
See the help for the constructor for more details.
Args:
source_filename: source filename within current project
Returns:
RST filename
Note in particular: the way we structure the directories means that we
won't get clashes between files with idential names in two different
directories. However, we must also incorporate the original source
filename, in particular for C++ where ``thing.h`` and ``thing.cpp``
must not generate the same RST filename. So we just add ``.rst``.
"""
highest_code_to_target = relative_filename_within_dir(
source_filename, self.highest_code_dir)
bname = basename(source_filename)
result = join(self.autodoc_rst_root_dir,
dirname(highest_code_to_target),
bname + EXT_RST)
log.debug("Source {!r} -> RST {!r}", source_filename, result)
return result
def write_index_and_rst_files(self, overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
| |
+ b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b | |
*in_* A)) @ (4, DEDUCE)
with (x *in_* A) @ 5:
(x *in_* B) @ (6, PUT_THEOREM, "element_of_subset", A, 5, 0)
((x *in_* (A *cap* B)) == ((x *in_* A) & (x *in_* B))) @ (7, BY_THEOREM, "cap")
(x *in_* (A *cap* B)) @ (8, TAUTOLOGY, 5, 6, 7)
((x *in_* A) >> (x *in_* (A *cap* B))) @ (8, DEDUCE)
((x *in_* (A *cap* B)) == (x *in_* A)) @ (9, TAUTOLOGY, 4, 8)
All(x_, (x_ *in_* (A *cap* B)) == (x_ *in_* A)) @ (10, CLOSING, 9)
((A *cap* B) == A) @ (11, BY_THEOREM, "extensionality", 10)
((A *inc* B) >> ((A *cap* B) == A)) @ (12, DEDUCE)
All(A_, B_, (A_ *inc* B_) >> ((A_ *cap* B_) == A_)) @ ("cap_subset", CLOSING, 12)
# separation
clear()
with ((a *inc* b) & Set(b)) @ 0:
(a *inc* b) @ (1, TAUTOLOGY, 0)
Set(b) @ (2, TAUTOLOGY, 0)
Function(Identity(a)) @ (3, BY_THEOREM, "identity_is_function")
Set(Identity(a)[b]) @ (4, BY_THEOREM, "replacement", 3, 2)
(Identity(a)[b] == (a *cap* b)) @ (5, BY_THEOREM, "image_of_identity")
((a *cap* b) == a) @ (6, BY_THEOREM, "cap_subset", 1)
(a == Identity(a)[b]) @ (7, BY_EQUIVALENCE, 5, 6)
Set(a) @ (8, REPLACE, 4, 7)
(((a *inc* b) & Set(b)) >> Set(a)) @ (9, DEDUCE)
All(a_, b_, ((a_ *inc* b_) & Set(b_)) >> Set(a_)) @ ("separation", CLOSING, 9)
# empty is set
clear()
((Set(a) & (Empty() *in_* a)) & All(x_, (x_ *in_* a) >> (Succ(x_) *in_* a))) @ (0, LET, a, "infinity")
(Empty() *in_* a) @ (1, TAUTOLOGY, 0)
Set(Empty()) @ ("empty_is_set", PUT_THEOREM, "set_condition", a, 1)
# inductive
clear()
Inductive = make_property("inductive")
All(a_, Inductive(a_) == ((Set(a_) & (Empty() *in_* a_)) & All(x_, (x_ *in_* a_) >> (Succ(x_) *in_* a_)))) @ ("inductive", DEFINE_PROPERTY, "inductive")
# inductive exist
clear()
((Set(a) & (Empty() *in_* a)) & All(x_, (x_ *in_* a) >> (Succ(x_) *in_* a))) @ (0, LET, a, "infinity")
Inductive(a) @ (1, BICONDITION, "inductive", 0)
Exist(a_, Inductive(a_)) @ ("inductive_exist", FOUND, a, 1)
# self inclusion
clear()
((x *in_* A) >> (x *in_* A)) @ (0, TAUTOLOGY)
All(x_, (x_ *in_* A) >> (x_ *in_* A)) @ (1, CLOSING, 0)
(A *inc* A) @ (2, BICONDITION, "inclusion", 1)
All(A_, A_ *inc* A_) @ ("self_inclusion", CLOSING, 2)
# nonempty
clear()
with (A != Empty()) @ 0:
with All(x_, ~ (x_ *in_* A)) @ 1:
(~ (x *in_* A)) @ (2, PUT, x, 1)
((x *in_* Empty()) == false) @ (3, BY_THEOREM, "empty")
((x *in_* A) == (x *in_* Empty())) @ (4, TAUTOLOGY, 2, 3)
All(x_, (x_ *in_* A) == (x_ *in_* Empty())) @ (5, CLOSING, 4)
(A == Empty()) @ (6, BY_THEOREM, "extensionality", 5)
false @ (7, TAUTOLOGY, 0, 6)
(All(x_, ~ (x_ *in_* A)) >> false) @ (8, DEDUCE)
((~ Exist(x_, (x_ *in_* A))) == All(x_, ~ (x_ *in_* A))) @ (9, DUAL)
Exist(x_, (x_ *in_* A)) @ (10, TAUTOLOGY, 8, 9)
((A != Empty()) >> Exist(x_, (x_ *in_* A))) @ (11, DEDUCE)
with Exist(x_, (x_ *in_* A)) @ 12:
(y *in_* A) @ (13, LET, y, 12)
((y *in_* Empty()) == false) @ (14, BY_THEOREM, "empty")
with (A == Empty()) @ 15:
(y *in_* Empty()) @ (16, REPLACE, 13, 15)
false @ (17, TAUTOLOGY, 16, 14)
((A == Empty()) >> false) @ (18, DEDUCE)
(A != Empty()) @ (19, TAUTOLOGY, 18)
(Exist(x_, x_ *in_* A) >> (A != Empty())) @ (20, DEDUCE)
((A != Empty()) == Exist(x_, (x_ *in_* A))) @ (21, TAUTOLOGY, 11, 20)
All(A_, (A_ != Empty()) == Exist(x_, (x_ *in_* A_))) @ ("nonempty", CLOSING, 21)
# nonempty condition
with (x *in_* A) @ 0:
Exist(x_, x_ *in_* A) @ (1, FOUND, x, 0)
(A != Empty()) @ (2, BICONDITION, "nonempty", 1)
((x *in_* A) >> (A != Empty())) @ (3, DEDUCE)
All(x_, A_, (x_ *in_* A_) >> (A_ != Empty())) @ (4, CLOSING, 3)
# naturals
clear()
UniquelyExist(C, All(x_, (x_ *in_* C) == (Set(x_) & All(A_, Inductive(A_) >> (x_ *in_* A_))))) @ (0, DEFINE_CLASS, C)
Naturals = make_function("naturals")
All(x_, (x_ *in_* Naturals()) == (Set(x_) & All(A_, Inductive(A_) >> (x_ *in_* A_)))) @ (1, DEFINE_FUNCTION, "naturals", 0)
with All(A_, Inductive(A_) >> (x *in_* A_)) @ 2:
Inductive(a) @ (3, LET, a, "inductive_exist")
(Inductive(a) >> (x *in_* a)) @ (4, PUT, a, 2)
(x *in_* a) @ (5, TAUTOLOGY, 3, 4)
Set(x) @ (6, PUT_THEOREM, "set_condition", a, 5)
(Set(x) & All(A_, Inductive(A_) >> (x *in_* A_))) @ (7, TAUTOLOGY, 6, 2)
(x *in_* Naturals()) @ (8, BICONDITION, 1, 7)
(All(A_, Inductive(A_) >> (x *in_* A_)) >> (x *in_* Naturals())) @ (9, DEDUCE)
with (x *in_* Naturals()) @ 10:
(Set(x) & All(A_, Inductive(A_) >> (x *in_* A_))) @ (11, BICONDITION, 1, 10)
All(A_, Inductive(A_) >> (x *in_* A_)) @ (12, TAUTOLOGY, 11)
((x *in_* Naturals()) >> All(A_, Inductive(A_) >> (x *in_* A_))) @ (13, DEDUCE)
((x *in_* Naturals()) == All(A_, Inductive(A_) >> (x *in_* A_))) @ (14, TAUTOLOGY, 13, 9)
All(x_, (x_ *in_* Naturals()) == All(A_, Inductive(A_) >> (x_ *in_* A_))) @ ("naturals", CLOSING, 14)
# empty in naturals
clear()
with Inductive(A) @ 0:
((Set(A) & (Empty() *in_* A)) & All(x_, (x_ *in_* A) >> (Succ(x_) *in_* A))) @ (1, BICONDITION, "inductive", 0)
(Empty() *in_* A) @ (2, TAUTOLOGY, 1)
(Inductive(A) >> (Empty() *in_* A)) @ (3, DEDUCE)
All(A_, Inductive(A_) >> (Empty() *in_* A_)) @ (4, CLOSING, 3)
(Empty() *in_* Naturals()) @ ("empty_in_naturals", BICONDITION, "naturals", 4)
# successor in naturals
clear()
with (x *in_* Naturals()) @ 0:
All(A_, Inductive(A_) >> (x *in_* A_)) @ (1, BICONDITION, "naturals", 0)
with Inductive(A) @ 2:
(x *in_* A) @ (3, BY_THEOREM, 1, 2)
((Set(A) & (Empty() *in_* A)) & All(x_, (x_ *in_* A) >> (Succ(x_) *in_* A))) @ (4, BICONDITION, "inductive", 2)
All(x_, (x_ *in_* A) >> (Succ(x_) *in_* A)) @ (5, TAUTOLOGY, 4)
(Succ(x) *in_* A) @ (6, BY_THEOREM, 5, 3)
(Inductive(A) >> (Succ(x) *in_* A)) @ (7, DEDUCE)
All(A_, Inductive(A_) >> (Succ(x) *in_* A_)) @ (8, CLOSING, 7)
(Succ(x) *in_* Naturals()) @ (9, BICONDITION, "naturals", 8)
((x *in_* Naturals()) >> (Succ(x) *in_* Naturals())) @ (10, DEDUCE)
All(x_, (x_ *in_* Naturals()) >> (Succ(x_) *in_* Naturals())) @ ("successor_in_naturals", CLOSING, 10)
# naturals is smallest
clear()
with Inductive(A) @ 0:
with (x *in_* Naturals()) @ 1:
All(A_, Inductive(A_) >> (x *in_* A_)) @ (2, BICONDITION, "naturals", 1)
(x *in_* A) @ (3, BY_THEOREM, 2, 0)
((x *in_* Naturals()) >> (x *in_* A)) @ (4, DEDUCE)
All(x_, (x_ *in_* Naturals()) >> (x_ *in_* A)) @ (5, CLOSING, 4)
(Naturals() *inc* A) @ (6, BICONDITION, "inclusion", 5)
(Inductive(A) >> (Naturals() *inc* A)) @ (7, DEDUCE)
All(A_, Inductive(A_) >> (Naturals() *inc* A_)) @ ("naturals_is_smallest", CLOSING, 7)
# naturals is set
clear()
Inductive(a) @ (0, LET, a, "inductive_exist")
((Set(a) & (Empty() *in_* a)) & All(x_, (x_ *in_* a) >> (Succ(x_) *in_* a))) @ (1, BICONDITION, "inductive", 0)
Set(a) @ (2, TAUTOLOGY, 1)
(Naturals() *inc* a) @ (3, BY_THEOREM, "naturals_is_smallest", 0)
Set(Naturals()) @ ("naturals_is_set", PUT_THEOREM, "separation", a, 2, 3)
# naturals is inductive
clear()
((Set(Naturals()) & (Empty() *in_* Naturals())) & All(x_, (x_ *in_* Naturals()) >> (Succ(x_) *in_* Naturals()))) @ (0, TAUTOLOGY, "empty_in_naturals", "successor_in_naturals", "naturals_is_set")
Inductive(Naturals()) @ ("naturals_is_inductive", BICONDITION, "inductive", 0)
# bi-inclusion
clear()
with ((A *inc* B) & (B *inc* A)) @ 0:
(A *inc* B) @ (1, TAUTOLOGY, 0)
(B *inc* A) @ (2, TAUTOLOGY, 0)
with (x *in_* A) @ 3:
(x *in_* B) @ (4, PUT_THEOREM, "element_of_subset", A, 1, 3)
((x *in_* A) >> (x *in_* B)) @ (7, DEDUCE)
with (x *in_* B) @ 5:
(x *in_* A) @ (6, PUT_THEOREM, "element_of_subset", B, 2, 5)
((x *in_* B) >> (x *in_* A)) @ (8, DEDUCE)
((x *in_* A) == (x *in_* B)) @ (9, TAUTOLOGY, 7, 8)
All(x_, (x_ *in_* A) == (x_ *in_* B)) @ (10, CLOSING, 9)
(A == B) @ (11, BY_THEOREM, "extensionality", 10)
(((A *inc* B) & (B *inc* A)) >> (A == B)) @ (12, DEDUCE)
All(A_, B_, ((A_ *inc* B_) & (B_ *inc* A_)) >> (A_ == B_)) @ ("bi-inclusion", CLOSING, 12)
def induction(target, C0, C1, initial, iteration):
initial = proof_history[initial]
assert initial.is_proved()
initial @ -11
iteration = proof_history[iteration]
assert iteration.is_proved()
iteration @ -12
assert target.type_ == TYPE_ALL
n0 = target.bound
cursor = target.statement
assert cursor.type_ == TYPE_IMPLY
def Prop(x):
return target.statement.conclusion.substitute(n0, x)
UniquelyExist(C0, All(x_, (x_ *in_* C0) == ((Set(x_) & ((x_ *in_* Naturals()) & Prop(x_)))))) @ (-13, DEFINE_CLASS, C0)
All(x_, (x_ *in_* C1) == (Set(x_) | |
log_msg(f"Data translated by backchannel to send to agent for operation: {agent_operation}", data)
(resp_status, resp_text) = await self.admin_POST(agent_operation, data)
resp_json = json.loads(resp_text)
if resp_status == 200:
if operation == "send-request":
# set a thread_id for the test harness
if "piid" in resp_json:
resp_json["thread_id"] = resp_json["piid"]
else:
raise Exception(f"No piid(thread_id) found in response for operation: {agent_operation} {resp_text}")
# The response doesn't have a state. Get it from the present_proof_states webhook message
if "piid" in resp_json:
wh_id = resp_json["piid"]
else:
wh_id = rec_id
#(wh_status, wh_text) = await self.make_agent_GET_request_response(topic, wh_id)
await asyncio.sleep(1)
present_proof_states_msg = pop_resource_latest("present-proof-states-msg")
#present_proof_states_msg = json.loads(wh_text)
if "StateID" in present_proof_states_msg["message"]:
resp_json["state"] = present_proof_states_msg["message"]["StateID"]
else:
raise Exception(f"Could not retieve State from webhook message: {present_proof_states_msg}")
resp_text = json.dumps(resp_json)
log_msg(resp_status, resp_text)
if resp_status == 200: resp_text = self.agent_state_translation(op["topic"], operation, resp_text)
return (resp_status, resp_text)
def add_did_exchange_state_to_response(self, operation, raw_response):
resp_json = json.loads(raw_response)
if operation == 'send-response':
resp_json['state'] = 'response-sent'
elif operation == 'send-message':
resp_json['state'] = 'request-sent'
return json.dumps(resp_json)
async def make_agent_GET_request(
self, op, rec_id=None, text=False, params=None
) -> (int, str):
if op["topic"] == "status":
status = 200 if self.ACTIVE else 418
status_msg = "Active" if self.ACTIVE else "Inactive"
return (status, json.dumps({"status": status_msg}))
if op["topic"] == "version":
if self.afgo_version is not None:
status = 200
status_msg = self.afgo_version
else:
status = 200
status_msg = "unknown"
return (status, status_msg)
elif op["topic"] == "connection" or op["topic"] == "did-exchange":
if rec_id:
connection_id = rec_id
agent_operation = "/connections/" + connection_id
else:
agent_operation = "/connections"
log_msg('GET Request agent operation: ', agent_operation)
(resp_status, resp_text) = await self.admin_GET(agent_operation, params=params)
if resp_status != 200:
return (resp_status, resp_text)
log_msg('GET Request response details: ', resp_status, resp_text)
resp_json = json.loads(resp_text)
if len(resp_json) != 0:
if rec_id:
connection_info = { "connection_id": resp_json["result"]["ConnectionID"], "state": resp_json["result"]["State"], "connection": resp_json }
resp_text = json.dumps(connection_info)
else:
resp_json = resp_json["results"]
connection_infos = []
for connection in resp_json:
connection_info = {"connection_id": connection["ConnectionID"], "state": connection["State"], "connection": connection}
connection_infos.append(connection_info)
resp_text = json.dumps(connection_infos)
# translate the state from that the agent gave to what the tests expect
resp_text = self.agent_state_translation(op["topic"], None, resp_text)
return (resp_status, resp_text)
elif op["topic"] == "did":
agent_operation = "/vdr/did"
agent_name = os.getenv("AGENT_NAME")
orb_did_path = f"/data-mount/orb-dids/{agent_name}.json"
orb_did_name = os.getenv("AFGO_ORBDID_NAME")
if orb_did_name is None or len(orb_did_name) == 0:
orb_did_name = "<default orb did>"
with open(orb_did_path) as orb_did_file:
orb_did = orb_did_file.read()
orb_did_json = json.loads(orb_did)
(resp_status, resp_text) = await self.admin_POST(agent_operation, data={"did": orb_did_json, "name": orb_did_name})
if resp_status != 200:
if resp_status == 400: # we've already posted the DID to the agent, so we can just return the did
return (200, json.dumps({"did":orb_did_json["id"]}))
return (resp_status, "")
# import the ed25519 private key for orb did
priv_key_path = os.getenv("AFGO_ORBDID_PRIVKEY")
with open(priv_key_path) as priv_key_file:
priv_key = priv_key_file.read()
priv_key_json = json.loads(priv_key)
(resp_status, resp_text) = await self.admin_POST("/kms/import", data=priv_key_json)
return (resp_status, json.dumps({"did":orb_did_json["id"]}))
elif op["topic"] == "schema":
schema_id = rec_id
if schema_id is None:
agent_operation = "/schemas/schemas"
else:
agent_operation = "/schemas/" + schema_id
# afgo not have did schema
# dummy schema
schema = { "id": "did:", "name": "", "version": self.afgo_version }
return (200, json.dumps(schema))
elif op["topic"] == "credential-definition":
cred_def_id = rec_id
if cred_def_id is None:
agent_operation = "/credential-definitions/"
else:
agent_operation = "/credential-definitions/" + cred_def_id
#(resp_status, resp_text) = await self.admin_GET(agent_operation)
#if resp_status != 200:
# return (resp_status, resp_text)
resp_json = {"id": None }
return (200, json.dumps(resp_json))
elif op["topic"] == "issue-credential" or op["topic"] == "issue-credential-v2":
(wh_status, wh_text) = await self.make_agent_GET_request_response(op["topic"], rec_id)
issue_credential_states_msg = json.loads(wh_text)
if "StateID" in issue_credential_states_msg["message"]:
resp_json = {"state": issue_credential_states_msg["message"]["StateID"]}
else:
raise Exception(f"Could not retieve State from webhook message: {issue_credential_states_msg}")
return (200, json.dumps(resp_json))
elif op["topic"] == "credential":
operation = op["operation"]
if operation == 'revoked':
agent_operation = "/credential/" + operation + "/" + rec_id
else:
agent_operation = f"/verifiable/credential/name/{rec_id}"
#No quivalent GET in afgo
#afgo only provides /actions GET
(resp_status, resp_text) = await self.admin_GET(agent_operation)
if resp_status == 200:
resp_json = json.loads(resp_text)
# take the name (which was saved as the cred_id) and make it the referent
if "name" in resp_json:
resp_json["referent"] = resp_json["name"]
resp_text = json.dumps(resp_json)
else:
raise Exception(f"No name/id found in response for: {agent_operation}")
return (resp_status, resp_text)
elif op["topic"] == "proof" or op["topic"] == "proof-v2":
(wh_status, wh_text) = await self.make_agent_GET_request_response(op["topic"], rec_id)
present_proof_states_msg = json.loads(wh_text)
if "StateID" in present_proof_states_msg["message"]:
resp_json = {"state": present_proof_states_msg["message"]["StateID"]}
else:
raise Exception(f"Could not retieve State from webhook message: {present_proof_states_msg}")
return (wh_status, json.dumps(resp_json))
elif op["topic"] == "revocation":
operation = op["operation"]
agent_operation, admin_data = await self.get_agent_operation_afgo_version_based(op["topic"], operation, rec_id, data=None)
(resp_status, resp_text) = await self.admin_GET(agent_operation)
return (resp_status, resp_text)
return (501, '501: Not Implemented\n\n'.encode('utf8'))
async def handle_issue_credential_GET(self, op, rec_id=None, data=None):
pass
async def make_agent_DELETE_request(
self, op, rec_id=None, data=None, text=False, params=None
) -> (int, str):
if op["topic"] == "credential" and rec_id:
# swap thread id for cred ex id from the webhook
# cred_ex_id = await self.swap_thread_id_for_exchange_id(rec_id, "credential-msg","credential_exchange_id")
agent_operation = "/credential/" + rec_id
#operation = op["operation"]
#agent_operation, admin_data = await self.get_agent_operation_afgo_version_based(op["topic"], operation, rec_id, data)
log_msg(agent_operation)
(resp_status, resp_text) = await self.admin_DELETE(agent_operation)
if resp_status == 200: resp_text = self.agent_state_translation(op["topic"], None, resp_text)
return (resp_status, resp_text)
return (501, '501: Not Implemented\n\n'.encode('utf8'))
async def make_agent_GET_request_response(
self, topic, rec_id=None, text=False, params=None, message_name=None
) -> (int, str):
if topic == "connection" and rec_id:
connection_msg = pop_resource(rec_id, "connection-msg")
i = 0
while connection_msg is None and i < MAX_TIMEOUT:
await asyncio.sleep(1)
connection_msg = pop_resource(rec_id, "connection-msg")
i = i + 1
resp_status = 200
if connection_msg:
resp_text = json.dumps(connection_msg)
else:
resp_text = "{}"
return (resp_status, resp_text)
elif topic == "did":
# TODO: change to actual method call
return (200, 'some stats')
elif topic == "did-exchange" and rec_id:
didexchange_msg = pop_resource(rec_id, "didexchange-states-msg")
i = 0
while didexchange_msg is None and i < MAX_TIMEOUT:
await asyncio.sleep(1)
didexchange_msg = pop_resource(rec_id, "didexchange-states-msg")
i = i + 1
resp_status = 200
if didexchange_msg:
resp_text = json.dumps(didexchange_msg)
resp_text = self.agent_state_translation(topic, None, resp_text)
if 'message' in didexchange_msg:
conn_id = didexchange_msg['message']['Properties']['connectionID']
resp_text = json.dumps({ 'connection_id': conn_id, 'data': didexchange_msg })
else:
resp_text = "{}"
return (resp_status, resp_text)
elif topic == "out-of-band" and rec_id:
c_msg = pop_resource(rec_id, "didexchange-msg")
i = 0
while didexchange_msg is None and i < MAX_TIMEOUT:
await asyncio.sleep(1)
didexchange_msg = pop_resource(rec_id, "didexchange-msg")
i = i + 1
resp_status = 200
if didexchange_msg:
resp_text = json.dumps(didexchange_msg)
resp_text = self.agent_state_translation(topic, None, resp_text)
if 'message' in didexchange_msg:
conn_id = didexchange_msg['message']['Properties']['connectionID']
resp_text = json.dumps({ 'connection_id': conn_id, 'data': didexchange_msg })
else:
resp_text = "{}"
return (resp_status, resp_text)
elif (topic == "issue-credential" or topic == "issue-credential-v2") and rec_id:
if message_name is None:
message_name = "issue-credential-states-msg"
await asyncio.sleep(1)
credential_msg = pop_resource_latest(message_name)
i = 0
while credential_msg is None and i < MAX_TIMEOUT:
await asyncio.sleep(1)
credential_msg = pop_resource_latest(message_name)
i = i + 1
# If we couldn't get a state out of the states webhook message, see if we can get the type and determine what the
# state should be. This is a guess as afgo doesn't return states to receivers.
if (message_name == "issue-credential-states-msg") and (credential_msg == None or "message" not in credential_msg):
message_name = "issue-credential-actions-msg"
credential_msg = get_resource_latest(message_name)
i = 0
while credential_msg is None and i < MAX_TIMEOUT:
await asyncio.sleep(1)
# TODO May need to get instead of pop because the msg may be needed elsewhere.
#credential_msg = pop_resource_latest(message_name)
credential_msg = get_resource_latest(message_name)
i = i + 1
if "message" in credential_msg:
op_type = credential_msg["message"]["Message"]["@type"]
state = self.IssueCredentialTypeToStateTranslationDict[op_type]
credential_msg["message"]["StateID"] = state
credential_msg["state"] = state
else:
raise Exception(f"Could not retieve State from webhook message: {issue_credential_actions_msg}")
# There is an issue with the issue command and it needs the credential~attach as well as the thread_id
# Because we are popping the webhook off the stack because we are getting the protocol state we are losing
# the credential details (it isn't contained in every webhook message), and it is no longer being sent by
# the tests. So, if the credential message contains the full credential like filters~attach, then re-add
# it to the stack again keyed by the piid called credential_details_msg. This way if any call has a problem
# getting the cred details from the webhook messages, we have it here | |
def __init__(self, value: complex) -> None:
self.value = value
super().__init__()
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value})'
def infix(self) -> str:
"""returns infix representation of the tree"""
return f'({self.value})'
def evaluate(self, env: Optional[Environment] = None) -> complex:
"""Evaluates the expression tree using the values from env, returns int or float"""
return self.value
def mathml(self) -> str:
"""returns the MathML representation of the tree"""
return mathml_tag('row',
mathml_tag('n',
str(self.value.real))
+ mathml_tag('o', '+')
+ mathml_tag('row',
mathml_tag('n', str(self.value.imag))
+ mathml_tag('i', 'i')))
def wolfram(self) -> str:
"""return wolfram language representation of the tree"""
return f'{self.value.real} + {self.value.imag} \\[ImaginaryI]'
class Pi(Constant):
"""mathematical constant in expression tree"""
__slots__ = ('value',)
def __init__(self) -> None:
self.value = pi
super().__init__()
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
def infix(self) -> str:
"""returns infix representation of the tree"""
return 'pi'
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
return self.value
def mathml(self) -> str:
"""returns the MathML representation of the tree"""
return mathml_tag('row',
mathml_tag('n', 'PI'))
def wolfram(self) -> str:
"""return wolfram language representation of the tree"""
return 'Pi'
def simplify(self, env: Optional[Environment] = None) -> Node:
"""returns a simplified version of the tree"""
return self
class E(Constant):
"""mathematical constant in expression tree"""
__slots__ = ('value',)
def __init__(self) -> None:
self.value = e
super().__init__()
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
def infix(self) -> str:
"""returns infix representation of the tree"""
return 'e'
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
return self.value
def mathml(self) -> str:
"""returns the MathML representation of the tree"""
return mathml_tag('row',
mathml_tag('n', 'E'))
def wolfram(self) -> str:
"""return wolfram language representation of the tree"""
return 'E'
def simplify(self, env: Optional[Environment] = None) -> Node:
"""returns a simplified version of the tree"""
return self
class Boolean(Constant):
"""real number in expression tree"""
__slots__ = ('value',)
def __init__(self, value: bool) -> None:
self.value = value
super().__init__()
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value})'
def infix(self) -> str:
"""returns infix representation of the tree"""
return str(self.value)
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
return self.value
def mathml(self) -> str:
"""returns the MathML representation of the tree"""
return mathml_tag('row',
mathml_tag('i',
str(self.value)))
def wolfram(self) -> str:
"""return wolfram language representation of the tree"""
return str(self.value)
def simplify(self, env: Optional[Environment] = None) -> Node:
"""returns a simplified version of the tree"""
return self
class Variable(Term):
"""Named variable in expression tree"""
__slots__ = ('name',)
def __init__(self, value: str) -> None:
assert isinstance(value, str)
self.name = value
super().__init__()
def __repr__(self) -> str:
return f'{self.__class__.__name__}(\'{self.name}\')'
def infix(self) -> str:
"""returns infix representation of the tree"""
return str(self.name)
def dependencies(self) -> set[str]:
"""returns set of all variables present in the tree"""
return {self.name}
def derivative(self, variable: str) -> Node:
"""returns an expression tree representing the (partial) derivative to the passed variable of this tree"""
if self.name == variable:
return Integer(1)
return Integer(0)
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
if env is None:
env = {}
try:
return Nodeify(env[self.name]).evaluate()
except Exception as ex:
raise EvaluationError from ex
def mathml(self) -> str:
"""returns the MathML representation of the tree"""
return mathml_tag('row',
mathml_tag('i',
str(self.name)))
def simplify(self, env: Optional[Environment] = None) -> Node:
"""returns a simplified version of the tree"""
if env is None:
env = {}
if self.name in env.keys():
return Nodeify(env[self.name])
else:
return self
def substitute(self, var: str, sub: Node) -> Node:
"""substitute a variable with an expression inside this tree, returns the resulting tree"""
if self.name == var:
return sub
return self
def wolfram(self) -> str:
"""return wolfram language representation of the tree"""
return self.name
class ArbitraryOperator(Node, metaclass=ABCMeta):
"""Abstract Base Class for multi-input operator in expression tree"""
__slots__ = 'children',
symbol = ''
_parentheses_needed = '()'
@property
@abstractmethod
def wolfram_func(self) -> str:
"""abstract property, returns function name for wolfram language"""
def __init__(self, *args: Node) -> None:
assert len(args) > 1
assert all(isinstance(x, Node) for x in args)
self.children = tuple(child for child in args)
super().__init__()
def __repr__(self) -> str:
return f'{self.__class__.__name__}{self.children}'
def dependencies(self) -> set[str]:
"""returns set of all variables present in the tree"""
# ugly but at least mypy shuts up
return set('').union(*(child.dependencies() for child in self.children)).difference(set(''))
@staticmethod
@abstractmethod
def _eval_func(x: ConstantType, y: ConstantType) -> ConstantType:
"""calculation function for 2 elements"""
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
try:
return reduce(self._eval_func, (child.evaluate(env) for child in self.children))
except Exception as ex:
raise EvaluationError from ex
def infix(self) -> str:
"""returns infix representation of the tree"""
return self.symbol.join(child.infix() if not isinstance(child, eval(self._parentheses_needed))
else f"({child.infix()})" for child in self.children)
def list_nodes(self) -> list[Node]:
"""returns a list of all nodes in the tree"""
return sum((child.list_nodes() for child in self.children), [self])
def mathml(self) -> str:
"""returns the MathML representation of the tree"""
return mathml_tag('row',
mathml_tag('o', self.symbol).join(child.mathml()
if isinstance(child, eval(self._parentheses_needed))
else mathml_tag('fenced', mathml_tag('row', child.mathml()))
for child in self.children))
def simplify(self, env: Optional[Environment] = None) -> Node:
"""returns a simplified version of the tree"""
try:
return Nodeify(self.evaluate(env)).simplify()
except EvaluationError:
pass
children = list(self.children)
old_repr = repr(children)
while True:
children = [child.simplify(env) for child in children]
# consolidate constants
constants: list[Node] = []
non_constants: list[Node] = []
for child in children:
if isinstance(child, (Integer, Rational, Real, Complex)):
constants.append(child)
else:
non_constants.append(child)
if len(constants) > 1:
children = non_constants + [self.__class__(*constants).simplify(env)]
else:
children = non_constants + constants
# operator specific parts
children = self._simplify(children, env)
# break out of loop
children = [child.simplify(env) for child in children]
children.sort(key=lambda x: x.infix())
if (new := repr(children)) == old_repr:
if len(children) > 1:
out = self.__class__(*children)
try:
return Nodeify(out.evaluate(env))
except EvaluationError:
return out
else:
return children[0]
else:
old_repr = new
def substitute(self, var: str, sub: Node) -> Node:
"""substitute a variable with an expression inside this tree, returns the resulting tree"""
return self.__class__(*(child.substitute(var, sub) for child in self.children))
def wolfram(self) -> str:
"""return wolfram language representation of the tree"""
return f'{self.wolfram_func}[' + ', '.join(child.wolfram() for child in self.children) + ']'
@staticmethod
@abstractmethod
def _simplify(children: list[Node], env: Optional[Environment] = None) -> list[Node]:
"""Simplification rules for operator"""
class Sum(ArbitraryOperator):
"""Addition operator node"""
__slots__ = ()
symbol = '+'
wolfram_func = 'Plus'
_parentheses_needed = '(ArbitraryLogicalOperator, ComparisonOperator)'
def derivative(self, variable: str) -> Node:
"""returns an expression tree representing the (partial) derivative to the passed variable of this tree"""
return Sum(*(child.derivative(variable) for child in self.children))
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> ConstantType:
"""calculation function for 2 elements"""
return x + y
@staticmethod
def _simplify(children: list[Node], env: Optional[Environment] = None) -> list[Node]:
"""returns a simplified version of the tree"""
def separate(arr: tuple[Node, ...]) -> tuple[Node, tuple[Node, ...]]:
"""separates array into a constant and any non-constant parts"""
if any(isinstance(x, Constant) for x in arr):
constant = next(filter(lambda x: isinstance(x, Constant), arr))
non_constants = arr[:(k := arr.index(constant))] + arr[k + 1:]
else:
constant = Integer(1)
non_constants = arr
return constant, non_constants
if len(children) == 1:
return children
elif len(children) == 0:
return [Integer(0)]
for i, child in enumerate(children):
# eliminate zeroes
if isinstance(child, Constant):
if child.evaluate() == 0:
del children[i]
return children
# consolidate sums
elif isinstance(child, Sum):
del children[i]
return children + list(child.children)
# eliminate negations
elif isinstance(child, Negate):
if child.child in children:
j = children.index(child.child)
del children[max(i, j)], children[min(i, j)]
if len(children) > 0:
return children
else:
return [Integer(0)]
else:
for j, child2 in enumerate(children):
if isinstance(child2, Product):
child2_constant, child2_variable_terms = separate(child2.children)
if isinstance(child.child, Product):
if repr(child.child.children) == repr(child2_variable_terms):
del children[max(i, j)], children[min(i, j)]
return children + [Product(child2_constant - 1, *child.child.children)]
elif len(child2_variable_terms) == 1 and repr(child.child) == repr(
child2_variable_terms[0]):
del children[max(i, j)], children[min(i, j)]
return children + [Product(child2_constant - 1, child.child)]
# join like products
elif isinstance(child, Product):
constant1, non_constants1 = separate(child.children)
for j, child2 | |
<reponame>YutakaMizugaki/warriorframework
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import Framework.Utils as Utils
from Framework.Utils import cli_Utils
from Framework.Utils.print_Utils import print_warning
from Framework.Utils.testcase_Utils import pNote
from Framework.Utils.data_Utils import getSystemData, get_session_id, get_credentials
from Framework.Utils.encryption_utils import decrypt
from WarriorCore.Classes.warmock_class import mockready
from WarriorCore.Classes.war_cli_class import WarriorCliClass
from Framework.ClassUtils.WNetwork.warrior_cli_class import WarriorCli
"""This is the cli_actions module that has all cli related keywords """
class CliActions(object):
"""CliActions class which has methods(keywords)
related to actions performed on any command line interface """
def __init__(self):
"""constructor"""
self.resultfile = Utils.config_Utils.resultfile
self.datafile = Utils.config_Utils.datafile
self.logsdir = Utils.config_Utils.logsdir
self.filename = Utils.config_Utils.filename
self.logfile = Utils.config_Utils.logfile
@mockready
def connect(self, system_name, session_name=None, prompt=".*(%|#|\$)",
ip_type="ip", via_host=None, tuple_pty_dimensions=None):
"""
This is a generic connect that can connect to ssh/telnet based
on the conn_type provided by the user in the input datafile.
:Datafile usage:
Tags or attributes to be used in input datafile for the system or subsystem
If both tag and attribute is provided the attribute will be used.
1. ip = IP address of the system.
Default value for ip type is ip, it can take any type of ip's
to connect to (like ipv4, ipv6, dns etc)
Users can provide tag/attribute for any ip_type under the system
in the input datafile and specify the tag/attribute name
as the value for ip_type argument, then the connection will be
established using that value.
2. username = username for the session.
3. password = password for the session.
4. timeout = use if you want to set timeout while connecting,
used for both ssh and telnet
5. prompt = for ssh connections, this is the prompt expected when
the connection is successful, not required for telnet.
6. conn_type = the type of connection to be created (ssh/telnet).
7. ssh_port = use this tag to provide ssh port to connect to, if
not provided default ssh port of 22 will be used.
8. telnet_port = use this tag to provide a telnet port to connect to
if not provided default telnet port 23 will be used.
9. conn_options = extra arguments that will be used when sending
the ssh/telnet command, default is empty
10.custom_keystroke = a keystroke that will be sent after the initial
timeout, in case of server require a keystroke to show any prompt.
Default is the enter key
11.pty_dimensions = size of the pseudo-terminal specified as a
two-entry tuple (rows, columns), eg. (24, 80).
:Arguments:
1. system_name (string) = This can be name of the\
system or a subsystem.
To connect to a system provided system_name=system_name.
To connect to a single subsystem provide
system_name=system_name[subsystem_name].
To connect to multiple subsystems provide
system_name=system_name[subsystem1_name,subsystem2_name..etc..].
To connect to all subsystems under a system provide
system_name="system_name[all]".
2. session_name(string) = name of the session to the system.
3. prompt(string) = prompt expected in the terminal.
4. ip_type(string) = type of the ip address(ip, ipv4, ipv6, dns, etc).
5. via_host = Name of the system in the data file to be used as an
intermediate system for establishing nested connections,
currently it is applicable only for SSH connections.
6. tuple_pty_dimensions(tuple) = size of the pseudo-terminal specified as a
two-entry tuple(rows, columns), eg. (24, 80).
:Returns:
1. status(bool)= True / False.
2. session_id (dict element)= an id is generated for each connection
and each connection is stored in the framework's data_repository.
session_id=system_name+subsystem_name+session_name.
3. response dictionary(dict): an empty dictionary to store the responses of all
commands sent to the particular system or subsystem.
This dictionary is available in warrior frameworks global data_repository
and can be retrieved using the key= "session_id + _td_response".
"""
wdesc = "Connect to the ssh/telnet port of the system"
pNote(wdesc)
# Resolve system_name and subsystem_list
# Removing duplicate subsystem entry and blank spaces in entry name
system_name, subsystem_list = Utils.data_Utils.resolve_system_subsystem_list(self.datafile,
system_name)
output_dict = {}
status = True
attempt = 1 if subsystem_list is None else len(subsystem_list)
for i in range(attempt):
result = False
subsystem_name = subsystem_list[i] if subsystem_list is not None else None
# Put system_name in system_name[subsystem] format before calling
# connect_ssh/connect_telnet.
call_system_name = system_name
if subsystem_name:
call_system_name += "[{}]".format(subsystem_name)
conn_type = getSystemData(self.datafile, call_system_name, "conn_type")
if conn_type is not False:
if conn_type == "ssh":
result, output_dict = \
self.connect_ssh(call_system_name, session_name, prompt,
ip_type, via_host=via_host,
tuple_pty_dimensions=tuple_pty_dimensions)
elif conn_type == "telnet":
result, output_dict = \
self.connect_telnet(call_system_name, session_name, ip_type,
tuple_pty_dimensions=tuple_pty_dimensions)
else:
pNote("<conn_type>={0} provided for '{1}' is not "
"supported".format(conn_type, call_system_name), "error")
else:
pNote("conn_type not provided for system={0}".format(call_system_name), "warn")
status = status and result
return status, output_dict
@mockready
def disconnect(self, system_name, session_name=None):
""" Disconnects/Closes session established with the system
:Arguments:
1. system_name (string) = This can be name of the\
system or a subsystem.
To connect to a system provided system_name=system_name.
To connect to a single subsystem provide
system_name=system_name[subsystem_name].
To connect to multiple subsystems provide
system_name=system_name[subsystem1_name,subsystem2_name..etc..].
To connect to all subsystems under a system provide
system_name="system_name[all]".
2. session_name(string) = name of the session to the system
:Returns:
1. status(bool)= True / False
"""
wdesc = "Disconnects/Closes session established with the system/subsystem"
# Resolve system_name and subsystem_list
# Removing duplicate subsystem entry and blank spaces in entry name
system_name, subsystem_list = Utils.data_Utils.resolve_system_subsystem_list(self.datafile,
system_name)
status = True
attempt = 1 if subsystem_list is None else len(subsystem_list)
for i in range(attempt):
Utils.testcase_Utils.pNote(wdesc)
subsystem_name = subsystem_list[i] if subsystem_list is not None else None
call_system_name = system_name
if subsystem_name:
call_system_name += "[{}]".format(subsystem_name)
Utils.testcase_Utils.pSubStep(wdesc)
Utils.testcase_Utils.pNote(system_name)
Utils.testcase_Utils.pNote(self.datafile)
session_id = get_session_id(call_system_name, session_name)
wc_obj = Utils.data_Utils.get_object_from_datarepository(session_id)
msg1 = "Disconnect successful for system_name={0}, "\
"session_name={1}".format(system_name, session_name)
msg2 = "Disconnection of system_name={0}, "\
"session_name={1} Failed".format(system_name, session_name)
if WarriorCliClass.mock or WarriorCliClass.sim:
result = True
elif (isinstance(wc_obj, WarriorCli) and
wc_obj.conn_obj is not None and
wc_obj.conn_obj.target_host is not None):
# execute smart action to produce user report
connect_testdata = Utils.data_Utils.get_object_from_datarepository(session_id+"_system",
verbose=False)
if connect_testdata is not None and connect_testdata is not False:
Utils.cli_Utils.smart_action(self.datafile, call_system_name, "",
wc_obj.conn_obj.target_host,
"disconnect", connect_testdata)
wc_obj.disconnect()
result = False if wc_obj.isalive() else True
else:
pNote("session does not exist", "warning")
result = False
msg = msg1 if result else msg2
if not WarriorCliClass.mock and not WarriorCliClass.sim:
Utils.testcase_Utils.pNote(msg)
Utils.testcase_Utils.report_substep_status(result)
status = status and result
return status
@mockready
def connect_ssh(self, system_name, session_name=None, prompt=".*(%|#|\$)",
ip_type="ip", int_timeout=60, via_host=None,
tuple_pty_dimensions=None):
"""Connects to the ssh port of the the given system or subsystems
:Datafile usage:
Tags or attributes to be used in input datafile for the system or subsystem
If both tag and attribute is provided the attribute will be used.
1. ip = IP address of the system.
Default value for ip type is ip, it can take any type of ip's
to connect to (like ipv4, ipv6, dns etc)
Users can provide tag/attribute for any ip_type under the system
in the input datafile and specify the tag/attribute name
as the value for ip_type argument, then the connection will be
established using that value.
2. username = username for the ssh session
3. password = password for the ssh session
4. timeout = use if you want to set timeout while connecting
5. prompt = the prompt expected when the connection is successful
6. ssh_port = use this tag to provide a ssh port to connect to,
if not provided default ssh port 22 will be used.
7. conn_options = extra arguments that will be used when sending
the ssh/telnet command, default is empty
8. custom_keystroke = a keystroke that will be sent after the initial
timeout, in case of server require a keystroke to show any prompt.
Default is the enter key
9. pty_dimensions = size of the pseudo-terminal specified as a
two-entry tuple(rows, columns), eg. (24, 80).
:Arguments:
1. system_name (string) = This can | |
# -*- coding: utf-8 -*-
import json
from datetime import timedelta
from django.conf import settings
from django.core import mail
from django.test.utils import override_settings
from django.utils.encoding import force_text
from freezegun import freeze_time
from rest_framework.exceptions import ErrorDetail
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.utils import generate_addon_guid
from olympia.amo.tests import (
APITestClient, TestCase, addon_factory, reverse_ns, user_factory,
version_factory)
from olympia.ratings.models import Rating, RatingFlag
locmem_cache = settings.CACHES.copy()
locmem_cache['default']['BACKEND'] = 'django.core.cache.backends.locmem.LocMemCache' # noqa
class TestRatingViewSetGet(TestCase):
client_class = APITestClient
list_url_name = 'rating-list'
detail_url_name = 'rating-detail'
def setUp(self):
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
self.url = reverse_ns(self.list_url_name)
def test_url_v3(self):
assert reverse_ns('rating-list', api_version='v3').endswith(
'/v3/reviews/review/')
rating = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory())
detail_url = reverse_ns(
'rating-detail', api_version='v3', kwargs={'pk': rating.pk})
assert detail_url.endswith('/v3/reviews/review/%d/' % rating.pk)
def test_url_default(self):
assert self.url.endswith('/v4/ratings/rating/')
rating = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory())
detail_url = reverse_ns(self.detail_url_name, kwargs={'pk': rating.pk})
assert detail_url.endswith('/v4/ratings/rating/%d/' % rating.pk)
def test_list_addon(self, **kwargs):
review1 = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory(),
rating=1)
review2 = Rating.objects.create(
addon=self.addon, body='review 2', user=user_factory(),
rating=2)
review1.update(created=self.days_ago(1))
# Add a review belonging to a different add-on, a reply, a deleted
# review and another older review by the same user as the first review.
# They should not be present in the list.
review_deleted = Rating.objects.create(
addon=self.addon, body='review deleted', user=review1.user,
rating=3)
review_deleted.delete()
Rating.objects.create(
addon=self.addon, body='reply to review 1', reply_to=review1,
user=user_factory())
Rating.objects.create(
addon=addon_factory(), body='review other addon',
user=user_factory(), rating=4)
older_review = Rating.objects.create(
addon=self.addon, body='review same user/addon older',
user=review1.user, rating=5)
# We change `created` manually after the actual creation, so we need to
# force a full refresh of the denormalized fields, because this
# normally only happens at creation time.
older_review.update(created=self.days_ago(42))
older_review.update_denormalized_fields()
assert review1.reload().is_latest is True
assert older_review.reload().is_latest is False
params = {'addon': self.addon.pk}
params.update(kwargs)
response = self.client.get(self.url, params)
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['count'] == 2
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == review2.pk
assert data['results'][1]['id'] == review1.pk
if 'show_permissions_for' not in kwargs:
assert 'can_reply' not in data
if 'show_grouped_ratings' not in kwargs:
assert 'grouped_ratings' not in data
if 'show_for' not in kwargs:
assert 'flags' not in data['results'][0]
assert 'flags' not in data['results'][1]
return data
def test_list_show_permission_for_anonymous(self):
response = self.client.get(
self.url, {'addon': self.addon.pk,
'show_permissions_for': 666})
assert response.status_code == 400
assert response.data['detail'] == (
'show_permissions_for parameter value should be equal to the user '
'id of the authenticated user')
def test_list_show_permission_for_not_int(self):
response = self.client.get(
self.url, {'addon': self.addon.pk,
'show_permissions_for': 'nope'})
assert response.status_code == 400
assert response.data['detail'] == (
'show_permissions_for parameter value should be equal to the user '
'id of the authenticated user')
def test_list_show_permission_for_not_right_user(self):
self.user = user_factory()
self.client.login_api(self.user)
response = self.client.get(
self.url, {'addon': self.addon.pk,
'show_permissions_for': self.user.pk + 42})
assert response.status_code == 400
assert response.data['detail'] == (
'show_permissions_for parameter value should be equal to the user '
'id of the authenticated user')
def test_list_show_permissions_for_without_addon(self):
self.user = user_factory()
self.client.login_api(self.user)
response = self.client.get(
self.url, {'user': self.user.pk,
'show_permissions_for': self.user.pk})
assert response.status_code == 400
assert response.data['detail'] == (
'show_permissions_for parameter is only valid if the addon '
'parameter is also present')
def test_list_can_reply(self):
self.user = user_factory()
self.client.login_api(self.user)
self.addon.addonuser_set.create(user=self.user, listed=False)
data = self.test_list_addon(show_permissions_for=self.user.pk)
assert data['can_reply'] is True
def test_list_can_not_reply(self):
self.user = user_factory()
self.client.login_api(self.user)
data = self.test_list_addon(show_permissions_for=self.user.pk)
assert data['can_reply'] is False
def test_list_can_reply_field_absent_in_v3(self):
self.user = user_factory()
self.client.login_api(self.user)
self.url = reverse_ns('rating-list', api_version='v3')
data = self.test_list_addon(show_permissions_for=self.user.pk)
assert 'can_reply' not in data
def test_list_addon_queries(self):
version1 = self.addon.current_version
version2 = version_factory(addon=self.addon)
review1 = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory(),
rating=1, version=version1)
review2 = Rating.objects.create(
addon=self.addon, body='review 2', user=user_factory(),
rating=2, version=version2)
review3 = Rating.objects.create(
addon=self.addon, body='review 3', user=user_factory(),
rating=2, version=version1)
review2.update(created=self.days_ago(1))
review1.update(created=self.days_ago(2))
assert Rating.unfiltered.count() == 3
with self.assertNumQueries(7):
# 7 queries:
# - Two for opening and releasing a savepoint. Those only happen in
# tests, because TransactionTestCase wraps things in atomic().
# - One for the ratings count (pagination)
# - One for the ratings themselves
# - One for the replies (there aren't any, but we don't know
# that without making a query)
# - One for the addon
# - One for its translations
response = self.client.get(
self.url, {'addon': self.addon.pk, 'lang': 'en-US'})
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['count'] == 3
assert data['results']
assert len(data['results']) == 3
assert data['results'][0]['body'] == review3.body
assert data['results'][0]['addon']['slug'] == self.addon.slug
assert data['results'][1]['body'] == review2.body
assert data['results'][1]['addon']['slug'] == self.addon.slug
assert data['results'][2]['body'] == review1.body
assert data['results'][2]['addon']['slug'] == self.addon.slug
def test_list_addon_queries_with_replies(self):
version1 = self.addon.current_version
version2 = version_factory(addon=self.addon)
review1 = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory(),
rating=1, version=version1)
review2 = Rating.objects.create(
addon=self.addon, body='review 2', user=user_factory(),
rating=2, version=version2)
review3 = Rating.objects.create(
addon=self.addon, body='review 3', user=user_factory(),
rating=2, version=version1)
review2.update(created=self.days_ago(1))
review1.update(created=self.days_ago(2))
reply1 = Rating.objects.create(
addon=self.addon, body='reply to review 1', reply_to=review1,
user=user_factory())
reply2 = Rating.objects.create(
addon=self.addon, body='reply to review 2', reply_to=review2,
user=reply1.user)
assert Rating.unfiltered.count() == 5
with self.assertNumQueries(7):
# 7 queries:
# - Two for opening and releasing a savepoint. Those only happen in
# tests, because TransactionTestCase wraps things in atomic().
# - One for the ratings count
# - One for the ratings
# - One for the replies (using prefetch_related())
# - One for the addon
# - One for its translations
response = self.client.get(
self.url, {'addon': self.addon.pk, 'lang': 'en-US'})
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['count'] == 3
assert data['results']
assert len(data['results']) == 3
assert data['results'][0]['body'] == review3.body
assert data['results'][0]['reply'] is None
assert data['results'][0]['addon']['slug'] == self.addon.slug
assert data['results'][1]['body'] == review2.body
assert data['results'][1]['reply']['body'] == reply2.body
assert data['results'][1]['addon']['slug'] == self.addon.slug
assert data['results'][2]['body'] == review1.body
assert data['results'][2]['reply']['body'] == reply1.body
assert data['results'][2]['addon']['slug'] == self.addon.slug
def test_list_addon_grouped_ratings(self):
data = self.test_list_addon(show_grouped_ratings='true')
assert data['grouped_ratings']['1'] == 1
assert data['grouped_ratings']['2'] == 1
assert data['grouped_ratings']['3'] == 0
assert data['grouped_ratings']['4'] == 0
assert data['grouped_ratings']['5'] == 0
def test_list_addon_without_grouped_ratings(self):
data = self.test_list_addon(show_grouped_ratings='false')
assert 'grouped_ratings' not in data
def test_list_addon_with_funky_grouped_ratings_param(self):
response = self.client.get(self.url, {
'addon': self.addon.pk, 'show_grouped_ratings': 'blah'})
assert response.status_code == 400
data = json.loads(force_text(response.content))
assert data['detail'] == (
'show_grouped_ratings parameter should be a boolean')
def test_list_addon_unknown(self, **kwargs):
params = {'addon': self.addon.pk + 42}
params.update(kwargs)
response = self.client.get(self.url, params)
assert response.status_code == 404
data = json.loads(force_text(response.content))
return data
def test_list_addon_grouped_ratings_unknown_addon_not_present(self):
data = self.test_list_addon_unknown(show_grouped_ratings=1)
assert 'grouped_ratings' not in data
def test_list_addon_guid(self):
self.test_list_addon(addon=self.addon.guid)
def test_list_addon_slug(self):
self.test_list_addon(addon=self.addon.slug)
def test_list_with_empty_reviews(self):
def create_review(body='review text', user=None):
return Rating.objects.create(
addon=self.addon, user=user or user_factory(),
rating=3, body=body)
self.user = user_factory()
create_review()
create_review()
create_review(body=None)
create_review(body=None)
create_review(body=None, user=self.user)
# Do show the reviews with no body by default
response = self.client.get(self.url, {'addon': self.addon.pk})
data = json.loads(force_text(response.content))
assert data['count'] == 5 == len(data['results'])
self.client.login_api(self.user)
# Unless you filter them out
response = self.client.get(
self.url, {'addon': self.addon.pk, 'filter': 'without_empty_body'})
data = json.loads(force_text(response.content))
assert data['count'] == 2 == len(data['results'])
# And maybe you only want your own empty reviews
response = self.client.get(
self.url, {'addon': self.addon.pk,
'filter': 'without_empty_body,with_yours'})
data = json.loads(force_text(response.content))
assert data['count'] == 3 == len(data['results'])
def test_list_user(self, **kwargs):
self.user = user_factory()
review1 = Rating.objects.create(
addon=self.addon, body='review 1', user=self.user)
review2 = Rating.objects.create(
addon=self.addon, body='review 2', user=self.user)
review1.update(created=self.days_ago(1))
review2.update(created=self.days_ago(2))
# Add a review belonging to a different user, a reply and a deleted
# review. The reply should show up since it's made by the right user,
# but the rest should be ignored.
review_deleted = Rating.objects.create(
addon=self.addon, body='review deleted', user=self.user)
review_deleted.delete()
other_review = Rating.objects.create(
addon=addon_factory(), body='review from other user',
user=user_factory())
reply = Rating.objects.create(
addon=other_review.addon, body='reply to other user',
reply_to=other_review, user=self.user)
assert Rating.unfiltered.count() == 5
params = {'user': self.user.pk}
params.update(kwargs)
response = self.client.get(self.url, params)
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['count'] == 3
assert data['results']
assert len(data['results']) == 3
assert data['results'][0]['id'] == reply.pk
assert data['results'][1]['id'] == review1.pk
assert data['results'][2]['id'] == review2.pk
assert 'can_reply' not in data # Not enough information to show this.
return data
def test_list_addon_and_user(self):
self.user = user_factory()
old_review = Rating.objects.create(
addon=self.addon, body='old review', user=self.user)
old_review.update(created=self.days_ago(42))
recent_review = Rating.objects.create(
addon=self.addon, body='recent review', user=self.user)
# None of those extra reviews should show up.
review_deleted = Rating.objects.create(
addon=self.addon, body='review deleted', user=self.user)
review_deleted.delete()
other_review = Rating.objects.create(
addon=addon_factory(), body='review from other user',
user=user_factory())
Rating.objects.create(
addon=other_review.addon, body='reply to other user',
reply_to=other_review, user=self.user) # right user, wrong addon.
Rating.objects.create(
addon=addon_factory(), body='review from other addon',
user=self.user)
assert Rating.unfiltered.count() == 6
# Since we're filtering on both addon and user, only the most recent
# review from self.user on self.addon | |
self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec'])
self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec'])
self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text'])
def can_create_from_shape_impl(self):
return False
def produce_impl(self):
# This is the main part of the implementation: create the layout
# fetch the parameters
dbu = self.layout.dbu
ly = self.layout
cell = self.cell
shapes = self.cell.shapes
LayerSi = self.layer
LayerSiN = ly.layer(LayerSi)
a = self.a
n = self.n
wg_dis = self.wg_dis
phc_xdis = self.phc_xdis
wg_bend_radius = self.wg_bend_radius
wg_width = self.t
if (wg_dis)%2 == 0:
length_slab_x = n*a
else:
length_slab_x = (n-1)*a
half_slab_x = length_slab_x/2
param_phc = {"a": self.a, "n": self.n, "r": self.r, "wg_dis": self.wg_dis,
"layer": LayerSi, "pinrec": self.pinrec, "devrec": self.devrec}
pcell_phc = ly.create_cell("H0 cavity with waveguide, no etching", "SiQL_PCells", param_phc )
t_phc = Trans(Trans.R0,phc_xdis/dbu,(127)/dbu-(math.sqrt(3)/2*a*(wg_dis+1))/dbu)
instance = cell.insert(pya.CellInstArray(pcell_phc.cell_index(), t_phc))
param_GC = {"wavelength": self.wavelength, "n_t":self.n_t, "n_e":self.n_e, "angle_e":self.angle_e,
"grating_length":self.grating_length, "taper_length":self.taper_length, "dc":self.dc, "period":self.period,
"ff":self.ff, "t":self.t, "theta_c":self.theta_c,
"layer": LayerSi, "pinrec": self.pinrec, "devrec": self.devrec}
pcell_GC = ly.create_cell("SWG Fibre Coupler", "SiQL_PCells", param_GC )
t_GC = Trans(Trans.R0, 0,0)
instance = cell.insert(pya.CellInstArray(pcell_GC.cell_index(), t_GC, Point(0,127/dbu), Point(0,0), 3, 1))
param_taper = {"tri_base": self.tri_base, "tri_height":self.tri_height,
"taper_wg_length":self.taper_wg_length, "silayer":LayerSi,
"pinrec": self.pinrec, "devrec": self.devrec}
pcell_taper = ly.create_cell("Waveguide Triangle Tapers","SiQL_PCells",param_taper)
t_taper1 = Trans(Trans.R0,(phc_xdis-half_slab_x)/dbu,(127)/dbu)
instance = cell.insert(pya.CellInstArray(pcell_taper.cell_index(), t_taper1))
pcell_taper2 = ly.create_cell("Waveguide Triangle Tapers","SiQL_PCells",param_taper)
t_taper2 = Trans(Trans.R180, (phc_xdis+half_slab_x)/dbu,(127)/dbu)
instance = cell.insert(pya.CellInstArray(pcell_taper2.cell_index(), t_taper2))
pcell_taper3 = ly.create_cell("Waveguide Triangle Tapers","SiQL_PCells",param_taper)
t_taper3 = Trans(Trans.R180, (phc_xdis+half_slab_x)/dbu,(127-2*(wg_dis+1)*math.sqrt(3)/2*a)/dbu)
instance = cell.insert(pya.CellInstArray(pcell_taper3.cell_index(), t_taper3))
# gc middle to in port
points = [ [0, 127], [ phc_xdis-half_slab_x-self.taper_wg_length , 127] ]
layout_waveguide_abs(cell, LayerSi, points, wg_width, wg_bend_radius)
# gc top to through port
points2 = [ [0, 254], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 254], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 127], [ (phc_xdis+half_slab_x+self.taper_wg_length) , 127] ]
layout_waveguide_abs(cell, LayerSi, points2, wg_width, wg_bend_radius)
# gc bottom to coupled port
points3 = [ [0, 0], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 0], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 127-2*(wg_dis+1)*a*math.sqrt(3)/2], [ (phc_xdis+half_slab_x+self.taper_wg_length) , 127-2*(wg_dis+1)*a*math.sqrt(3)/2] ]
layout_waveguide_abs(cell, LayerSi, points3, wg_width, wg_bend_radius)
class L3c_Test_Structure(pya.PCellDeclarationHelper):
"""
The PCell declaration for the test structure with grating couplers and waveguides and a photonic crystal cavity
"""
def __init__(self):
# Important: initialize the super class
super(L3c_Test_Structure, self).__init__()
#taper parameters
self.param("tri_base", self.TypeDouble, "Taper Triangle Base (microns)", default = 0.363)
self.param("tri_height", self.TypeDouble, "Taper Triangle Height (microns)", default = 0.426)
self.param("taper_wg_length", self.TypeDouble, "Taper Length (microns)", default = 5)
self.param("w", self.TypeDouble, "Waveguide Width", default = 1.0)
self.param("wg_bend_radius", self.TypeDouble, "Waveguide Bend Radius (microns)", default = 15)
#photonic crystal cavity
self.param("a", self.TypeDouble, "lattice constant (microns)", default = 0.720)
self.param("n", self.TypeInt, "Number of holes in x and y direction", default = 34)
self.param("r", self.TypeDouble, "hole radius (microns)", default = 0.181)
self.param("wg_dis", self.TypeInt, "Waveguide distance (number of holes)", default = 3)
self.param("S1x", self.TypeDouble, "S1x shift", default = 0.337)
self.param("S2x", self.TypeDouble, "S2x shift", default = 0.27)
self.param("S3x", self.TypeDouble, "S3x shift", default = 0.088)
self.param("S4x", self.TypeDouble, "S4x shift", default = 0.323)
self.param("S5x", self.TypeDouble, "S5x shift", default = 0.0173)
self.param("phc_xdis", self.TypeDouble, "Distance from GC to middle of Cavity", default = 35)
#GC parameters
self.param("wavelength", self.TypeDouble, "Design Wavelength (micron)", default = 2.9)
self.param("n_t", self.TypeDouble, "Fiber Mode", default = 1.0)
self.param("n_e", self.TypeDouble, "Grating Index Parameter", default = 3.1)
self.param("angle_e", self.TypeDouble, "Taper Angle (deg)", default = 20.0)
self.param("grating_length", self.TypeDouble, "Grating Length (micron)", default = 32.0)
self.param("taper_length", self.TypeDouble, "Taper Length (micron)", default = 32.0)
self.param("dc", self.TypeDouble, "Duty Cycle", default = 0.488193)
self.param("period", self.TypeDouble, "Grating Period", default = 1.18939)
self.param("ff", self.TypeDouble, "Fill Factor", default = 0.244319)
self.param("t", self.TypeDouble, "Waveguide Width (micron)", default = 1.0)
self.param("theta_c", self.TypeDouble, "Insertion Angle (deg)", default = 8.0)
#Layer Parameters
TECHNOLOGY = get_technology_by_name('EBeam')
self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide'])
self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec'])
self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec'])
self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text'])
def can_create_from_shape_impl(self):
return False
def produce_impl(self):
# This is the main part of the implementation: create the layout
# fetch the parameters
dbu = self.layout.dbu
ly = self.layout
cell = self.cell
shapes = self.cell.shapes
LayerSi = self.layer
LayerSiN = ly.layer(self.layer)
LayerPinRecN = ly.layer(self.pinrec)
LayerDevRecN = ly.layer(self.devrec)
LayerTextN = ly.layer(self.textl)
a = self.a
n = self.n
wg_dis = self.wg_dis
phc_xdis = self.phc_xdis
wg_bend_radius = self.wg_bend_radius
wg_width = self.w
if wg_dis%2 == 0:
length_slab_x = (n-1)*a
else:
length_slab_x = n*a
half_slab_x = length_slab_x/2
param_phc = {"a": self.a, "n": self.n, "r": self.r, "wg_dis": self.wg_dis, "S1x":self.S1x, "S2x":self.S2x, "S3x":self.S3x, "S4x":self.S4x, "S5x":self.S5x,
"layer": self.layer, "pinrec": self.pinrec, "devrec": self.devrec}
pcell_phc = ly.create_cell("L3 cavity with waveguide", "SiQL_PCells", param_phc )
t1 = Trans(Trans.R0,phc_xdis/dbu,(127)/dbu-(math.sqrt(3)/2*a*(wg_dis+1))/dbu)
instance = cell.insert(pya.CellInstArray(pcell_phc.cell_index(), t1))
param_GC = {"wavelength": self.wavelength, "n_t":self.n_t, "n_e":self.n_e, "angle_e":self.angle_e,
"grating_length":self.grating_length, "taper_length":self.taper_length, "dc":self.dc, "period":self.period,
"ff":self.ff, "t":self.t, "theta_c":self.theta_c,
"layer": LayerSi, "pinrec": self.pinrec, "devrec": self.devrec}
pcell_GC = ly.create_cell("SWG Fibre Coupler", "SiQL_PCells", param_GC )
t_GC = Trans(Trans.R0, 0,0)
instance = cell.insert(pya.CellInstArray(pcell_GC.cell_index(), t_GC, Point(0,127/dbu), Point(0,0), 3, 1))
param_taper = {"tri_base": self.tri_base, "tri_height":self.tri_height,
"taper_wg_length":self.taper_wg_length, "wg_width": self.w, "silayer":LayerSi,
"pinrec": self.pinrec, "devrec": self.devrec}
pcell_taper = ly.create_cell("Waveguide Triangle Tapers","SiQL_PCells",param_taper)
t_taper1 = Trans(Trans.R0,(phc_xdis-half_slab_x)/dbu,(127)/dbu)
instance = cell.insert(pya.CellInstArray(pcell_taper.cell_index(), t_taper1))
pcell_taper2 = ly.create_cell("Waveguide Triangle Tapers","SiQL_PCells",param_taper)
t_taper2 = Trans(Trans.R180, (phc_xdis+half_slab_x)/dbu,(127)/dbu)
instance = cell.insert(pya.CellInstArray(pcell_taper2.cell_index(), t_taper2))
pcell_taper3 = ly.create_cell("Waveguide Triangle Tapers","SiQL_PCells",param_taper)
t_taper3 = Trans(Trans.R180, (phc_xdis+half_slab_x)/dbu,(127-2*(wg_dis+1)*math.sqrt(3)/2*a)/dbu)
instance = cell.insert(pya.CellInstArray(pcell_taper3.cell_index(), t_taper3))
# gc middle to in port
points = [ [0, 127], [ phc_xdis-half_slab_x-self.taper_wg_length , 127] ]
layout_waveguide_abs(cell, LayerSi, points, wg_width, wg_bend_radius)
# gc top to through port
points2 = [ [0, 254], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 254], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 127], [ (phc_xdis+half_slab_x+self.taper_wg_length) , 127] ]
layout_waveguide_abs(cell, LayerSi, points2, wg_width, wg_bend_radius)
# gc bottom to coupled port
points3 = [ [0, 0], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 0], [ (phc_xdis+half_slab_x+self.taper_wg_length)+wg_bend_radius , 127-2*(wg_dis+1)*a*math.sqrt(3)/2], [ (phc_xdis+half_slab_x+self.taper_wg_length) , 127-2*(wg_dis+1)*a*math.sqrt(3)/2] ]
layout_waveguide_abs(cell, LayerSi, points3, wg_width, wg_bend_radius)
class GC_to_GC_ref1(pya.PCellDeclarationHelper):
"""
The PCell declaration for the test structure with grating couplers and waveguides and a photonic crystal cavity
"""
def __init__(self):
# Important: initialize the super class
super(GC_to_GC_ref1, self).__init__()
#other waveguide parameters
self.param("wg_radius", self.TypeDouble, "Waveguide Radius (microns)", default = 15)
self.param("wg_width", self.TypeDouble, "Waveguide x Distance (microns)", default = 1)
self.param("wg_xdis", self.TypeDouble, "Waveguide x Distance (microns)", default = 5)
#Layer Parameters
TECHNOLOGY = get_technology_by_name('EBeam')
self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide'])
self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec'])
self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec'])
self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text'])
def can_create_from_shape_impl(self):
return False
def produce_impl(self):
# This is the main part of the implementation: create the layout
# fetch the parameters
dbu = self.layout.dbu
ly = self.layout
cell = self.cell
shapes = self.cell.shapes
LayerSi = self.layer
LayerSiN = ly.layer(self.layer)
LayerPinRecN = ly.layer(self.pinrec)
LayerDevRecN = ly.layer(self.devrec)
LayerTextN = ly.layer(self.textl)
wg_r = self.wg_radius
wg_w = self.wg_width
wg_xdis = self.wg_xdis
#uses the default parameters for the GC
param_GC = { "layer": LayerSi, "pinrec": self.pinrec, "devrec": self.devrec}
pcell_GC = ly.create_cell("SWG Fibre Coupler", "SiQL_PCells", param_GC )
t_GC = Trans(Trans.R0, 0,0)
#instance = cell.insert(pya.place_cell(pcell_GC, t_GC, Point(0,127/dbu), Point(0,0), 2, 1))
#instance=place_cell(cell,pcell_GC,[0.5,0.5])
cell.insert(pya.CellInstArray(pcell_GC.cell_index(),pya.Trans(pya.Trans.R0, 0, 0)))
print("test")
return
points = [ [0, 0], [wg_r+wg_xdis, 0],[wg_r+wg_xdis, 127], [ 0,127]]
#layout_waveguide_abs(cell, LayerSi, points, wg_w, wg_r)
class PhC_W1wg_reference(pya.PCellDeclarationHelper):
"""
Input: length, width
"""
import numpy
def __init__(self):
# Important: initialize the super class
super(PhC_W1wg_reference, self).__init__()
#phc parameters
self.param("a", self.TypeDouble, "lattice constant (microns)", default = 0.744)
self.param("n", self.TypeInt, "Number of holes in x and y direction", default = 30)
self.param("r", self.TypeDouble, "hole radius (microns)", default = 0.179)
self.param("wg_dis", self.TypeInt, "Waveguide distance (number of holes)", default = 2)
self.param("n_vertices", self.TypeInt, "Vertices of a hole", default = 32)
self.param("etch_condition", self.TypeInt, "Etch = 1, No Etch = 2", default = 1)
self.param("phc_xdis", self.TypeDouble, "Distance to middle of phc", default = 35)
#other waveguide parameters
self.param("wg_radius", self.TypeDouble, "Waveguide Radius (microns)", default = 15)
self.param("wg_width", self.TypeDouble, "Waveguide Radius (microns)", default = 1)
#taper parameters
self.param("tri_base", self.TypeDouble, | |
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
),
),
'HarvestPlantBoosterFamilies.dat': File(
fields=(
),
),
'HarvestPlantBoosters.dat': File(
fields=(
Field(
name='HarvestObjectsKey',
type='ulong',
key='HarvestObjects.dat',
unique=True,
),
Field(
name='Radius',
type='int',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Lifeforce',
type='int',
),
Field(
name='AdditionalCraftingOptionsChance',
type='int',
),
Field(
name='RareExtraChances',
type='int',
),
Field(
name='HarvestPlantBoosterFamilies',
type='int',
),
),
),
'HarvestSeedTypes.dat': File(
fields=(
Field(
name='HarvestObjectsKey',
type='ulong',
key='HarvestObjects.dat',
unique=True,
),
Field(
name='Key1',
type='ulong',
),
Field(
name='GrowthCycles',
type='int',
),
Field(
name='AOFiles',
type='ref|list|ref|string',
file_path=True,
file_ext='.ao',
),
Field(
name='Unknown4',
type='ref|list|int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Tier',
type='int',
),
Field(
name='RequiredNearbySeed_Tier',
type='int',
),
Field(
name='RequiredNearbySeed_Amount',
type='int',
),
Field(
name='WildLifeforceConsumedPercentage',
type='int',
),
Field(
name='VividLifeforceConsumedPercentage',
type='int',
),
Field(
name='PrimalLifeforceConsumedPercentage',
type='int',
),
Field(
name='Text',
type='ref|string',
),
Field(
name='HarvestCraftOptionsKeys',
type='ref|list|ulong',
key='HarvestCraftOptions.dat',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='ref|list|int',
),
Field(
name='AchievementItemsKeys',
type='ref|list|ulong',
key='AchievementItems.dat',
),
Field(
name='OutcomeType',
type='int',
),
),
),
'HarvestSpecialCraftCosts.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='int',
),
),
),
'HarvestSpecialCraftOptions.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Key1',
type='ulong',
),
),
),
'HarvestStorageLayout.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Button',
type='ref|string',
file_path=True,
),
Field(
name='ButtonHighlight',
type='ref|string',
file_path=True,
),
Field(
name='HasButton',
type='bool',
),
),
),
'HeistAreaFormationLayout.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
),
),
'HeistAreas.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True
),
Field(
name='WorldAreasKeys',
type='ref|list|ulong',
key='WorldAreas.dat'
),
Field(
name='Unknown2',
type='int',
),
Field(
name='EnvironmentsKey1',
type='ulong',
key='Environments.dat',
),
Field(
name='EnvironmentsKey2',
type='ulong',
key='Environments.dat',
),
Field(
name='HeistJobsKeys',
type='ref|list|ulong',
key='HeistJobs.dat',
),
Field(
name='Contract_BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat'
),
Field(
name='Blueprint_BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='DGRFile',
type='ref|string',
file_path=True,
file_ext='.dgr',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='Blueprint_DDSFile',
type='ref|string',
file_path=True,
file_ext='.dds',
),
Field(
name='AchievementItemsKeys',
type='ref|list|ulong',
key='AchievementItems.dat',
),
Field(
name='AchievementItemsKeys2',
type='ref|list|ulong',
key='AchievementItems.dat',
),
Field(
name='ClientStringsKey',
type='ulong',
key='ClientStrings.dat',
),
),
),
'HeistBalancePerLevel.dat': File(
fields=(
Field(
name='Level',
type='int',
),
Field(
name='Unknown1',
type='float',
),
Field(
name='Unknown2',
type='float',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='float',
),
Field(
name='Unknown6',
type='float',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Key2',
type='ulong',
),
Field(
name='Key3',
type='ulong',
),
Field(
name='Key4',
type='ulong',
),
Field(
name='Unknown17',
type='float',
),
Field(
name='Unknown18',
type='float',
),
Field(
name='Unknown19',
type='float',
),
Field(
name='Unknown20',
type='float',
),
Field(
name='Key5',
type='ulong',
),
Field(
name='Key6',
type='ulong',
),
Field(
name='Unknown23',
type='float',
),
Field(
name='Unknown24',
type='float',
),
Field(
name='Unknown25',
type='int'
)
),
),
'HeistBlueprintWindowTypes.dat': File(
fields=(
),
),
'HeistChestRewardTypes.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Art',
type='ref|string',
file_path=True,
),
Field(
name='RewardTypeName',
type='ref|string',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='RewardRoomName',
type='ref|string',
),
Field(
name='MinLevel',
type='int',
),
Field(
name='MaxLevel',
type='int',
),
Field(
name='Weight',
type='int',
),
Field(
name='RewardRoomName2',
type='ref|string',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
Field(
name='Unknown1',
type='int',
),
),
),
'HeistChestTypes.dat': File(
fields=(
),
),
'HeistChests.dat': File(
fields=(
Field(
name='ChestsKey',
type='ulong',
key='Chests.dat',
),
Field(
name='Weight',
type='int',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
Field(
name='HeistChestTypesKey',
type='int',
key='HeistChestTypes.dat'
),
),
),
'HeistChokepointFormation.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='ref|list|int',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
),
),
'HeistConstants.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Value',
type='float',
),
),
),
'HeistContracts.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
unique=True,
),
Field(
name='HeistAreasKey',
type='ulong',
key='HeistAreas.dat'
),
Field(
name='Unknown2',
type='int',
),
),
),
'HeistDoodadNPCs.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='AOFile',
type='ref|string',
file_path=True,
file_ext='.ao',
),
Field(
name='Stance',
type='ref|string',
),
Field(
name='Key1',
type='ulong',
),
),
),
'HeistDoors.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Unknown1',
type='ref|string',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Unknown4',
type='ref|string',
),
Field(
name='Unknown5',
type='ref|list|ref|string',
),
Field(
name='Unknown6',
type='ref|list|ref|string',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Key2',
type='ulong',
),
),
),
'HeistEquipment.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
unique=True,
),
Field(
name='RequiredJob_HeistJobsKey',
type='ulong',
key='HeistJobs.dat',
),
Field(
name='RequiredLevel',
type='int',
),
),
),
'HeistFormationMarkerType.dat': File(
fields=(
),
),
'HeistGeneration.dat': File(
fields=(
Field(
name='Level',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Unknown16',
type='int',
),
Field(
name='Unknown17',
type='int',
),
Field(
name='Unknown18',
type='int',
),
Field(
name='Unknown19',
type='int',
),
Field(
name='Unknown20',
type='int',
),
),
),
'HeistIntroAreas.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='DGRFile',
type='ref|string',
file_path=True,
file_ext='.dgr',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown11',
type='byte',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
),
),
'HeistJobs.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True
),
Field(
name='Name',
type='ref|string',
),
Field(
name='RequiredSkillIcon',
type='ref|string',
file_path=True,
),
Field(
name='SkillIcon',
type='ref|string',
file_path=True,
),
Field(
name='Unknown4',
type='float',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='MapIcon',
type='ref|string',
),
Field(
name='Level_StatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='Alert_StatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='Alarm_StatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='Cost_StatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='ExperienceGain_StatsKey',
type='ulong',
key='Stats.dat',
),
),
),
'HeistJobsExperiencePerLevel.dat': File(
fields=(
Field(
name='HeistJobsKey',
type='ulong',
key='HeistJobs.dat',
),
Field(
name='Tier',
type='int',
),
Field(
name='Experience',
type='int',
),
Field(
name='MinLevel',
type='int',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
),
),
'HeistLockType.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='HeistJobsKey',
type='ulong',
key='HeistJobs.dat',
),
Field(
name='SkillIcon',
type='ref|string',
file_path=True,
),
),
),
'HeistNPCAuras.dat': File(
fields=(
Field(
name='StatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='Unknown1',
type='ulong',
),
),
),
'HeistNPCBlueprintTypes.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown2',
type='int',
),
),
),
'HeistNPCDialogue.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
Field(
name='Keys1',
type='ref|list|ulong',
),
Field(
name='Unknown4',
type='int',
),
),
),
'HeistNPCStats.dat': File(
fields=(
Field(
name='StatsKey',
type='ulong',
key='Stats.dat'
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='Flag2',
type='bool',
),
Field(
name='Flag3',
type='bool',
),
),
),
'HeistNPCs.dat': File(
fields=(
Field(
name='NPCsKey',
type='ulong',
key='NPCs.dat',
),
Field(
name='MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
Field(
name='SkillLevel_HeistJobsKeys',
type='ref|list|ulong',
key='HeistJobs.dat',
),
Field(
name='PortraitFile',
type='ref|string',
file_path=True,
),
Field(
name='HeistNPCStatsKeys',
type='ref|list|ulong',
key='HeistNPCStats.dat',
),
Field(
name='StatValues',
type='ref|list|float',
),
Field(
name='Unknown6',
type='float',
),
Field(
name='SkillLevel_Values',
type='ref|list|int',
),
Field(
name='Name',
type='ref|string',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='SilhouetteFile',
type='ref|string',
file_path=True,
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='HeistNPCsKey',
type='ref|generic',
key='HeistNPCs.dat',
),
Field(
name='StatValues2',
type='ref|list|float',
),
Field(
name='Ally_NPCsKey',
type='ulong',
key='<KEY>',
),
Field(
name='ActiveNPCIcon',
type='ref|string',
file_path=True,
),
Field(
name='HeistJobsKey',
type='ulong',
key='HeistJobs.dat',
),
Field(
name='Equip_AchievementItemsKeys',
type='ref|list|ulong',
key='AchievementItems.dat'
),
Field(
name='AOFile',
type='ref|string',
file_path=True,
file_ext='.ao',
),
),
),
'HeistObjectiveValueDescriptions.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
),
),
'HeistObjectives.dat': File(
fields=(
Field(
name='key0',
type='ulong',
),
Field(
name='Unknown1',
type='float',
),
Field(
name='Name',
type='ref|string',
),
),
),
'HeistPatrolPacks.dat': File(
fields=(
Field(
name='MonsterPacksKey',
type='ulong',
key='MonsterPacks.dat',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown7',
type='byte',
),
),
),
'HeistQuestContracts.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
Field(
name='Key2',
type='ulong',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Key3',
type='ulong',
),
Field(
name='Key4',
type='ulong',
),
Field(
name='Flag1',
type='byte',
),
Field(
name='Flag2',
type='byte',
| |
<filename>my_mod/tlm_buffer.py
# coding: UTF-8
"""
tlm buffer
"""
import sys
# from collections import OrderedDict
# import pprint
def GenerateTlmBuffer(settings, other_obc_dbs):
DATA_START_ROW = 8
for i in range(len(settings["other_obc_data"])):
if not settings["other_obc_data"][i]["is_enable"]:
continue
obc_name = settings["other_obc_data"][i]["name"]
driver_type = settings["other_obc_data"][i]["driver_type"]
driver_name = settings["other_obc_data"][i]["driver_name"]
max_tlm_num = settings["other_obc_data"][i]["max_tlm_num"]
tlm_db = other_obc_dbs[obc_name]
body_c = ""
body_h = ""
tlmdef_body_h = ""
for tlm in tlm_db:
tlm_name = tlm["tlm_name"]
tlm_name_lower = tlm_name.lower()
body_c += (
"static DS_ERR_CODE {_obc_name_upper}_analyze_tlm_"
+ tlm_name_lower
+ "_(const CommonTlmPacket* packet, {_obc_name_upper}_TLM_CODE tlm_id, "
+ driver_type
+ "* "
+ driver_name
+ ");\n"
)
body_c += "\n"
body_c += "static CommonTlmPacket {_obc_name_upper}_ctp_;\n"
body_c += "\n"
body_h += "typedef struct " + driver_type + " " + driver_type + ";\n"
body_h += "\n"
body_h += "#define {_obc_name_upper}_MAX_TLM_NUM (" + str(max_tlm_num) + ")\n"
body_h += "\n"
body_h += "typedef struct\n"
body_h += "{{\n"
body_h += " CommonTlmPacket packet; //!< 最新のテレメパケットを保持\n"
body_h += " uint8_t is_null_packet; //!< 一度でもテレメを受信しているか?(空配列が読み出されるのを防ぐため)\n"
body_h += "}} {_obc_name_upper}_TlmBufferElem;\n"
body_h += "\n"
body_h += "typedef struct\n"
body_h += "{{\n"
body_h += " {_obc_name_upper}_TlmBufferElem tlm[{_obc_name_upper}_MAX_TLM_NUM]; //!< TLM ID ごとに保持\n"
body_h += "}} {_obc_name_upper}_TlmBuffer;\n"
body_h += "\n"
tlmdef_body_h += "typedef struct\n"
tlmdef_body_h += "{{\n"
for tlm in tlm_db:
tlm_name = tlm["tlm_name"]
tlm_name_lower = tlm_name.lower()
# pprint.pprint(tlm['data'][DATA_START_ROW:])
last_var_type = ""
tlm_struct_tree = {} # python3.7以上を想定しているので,キーの順番は保存されていることが前提
# tlm_struct_tree = collections.OrderedDict() # やっぱこっちで
for j in range(DATA_START_ROW, len(tlm["data"])):
comment = tlm["data"][j][0]
name = EscapeTlmElemName_(tlm["data"][j][1])
var_type = tlm["data"][j][2]
if comment == "" and name == "": # CommentもNameも空白なら打ち切り
break
if comment != "":
continue
if name == "":
continue
if var_type == "":
var_type = last_var_type
last_var_type = var_type
if last_var_type == "":
continue
# name_tree = name.lower().split(".")[2:] # OBC名.テレメ名.HOGE.FUGA を想定
name_tree = name.lower().split(".")
name_path = "/".join(name_tree)
if SetStructTree_(tlm_struct_tree, name_path, var_type):
print("Error: Tlm DB Struct Parse Err at " + name, file=sys.stderr)
sys.exit(1)
# pprint.pprint(tlm_struct_tree)
# for k, v in tlm_struct_tree.items():
# print(k)
# print(v)
# print("")
tlmdef_body_h += GenerateStructDef_(tlm_struct_tree, tlm_name_lower)
tlmdef_body_h += "}} {_obc_name_upper}_TlmData;\n"
body_h += (
"void {_obc_name_upper}_init_tlm_buffer(" + driver_type + "* " + driver_name + ");\n"
)
body_h += "\n"
body_h += (
"DS_ERR_CODE {_obc_name_upper}_buffer_tlm_packet(DS_StreamConfig* p_stream_config, "
+ driver_type
+ "* "
+ driver_name
+ ");\n"
)
body_h += "\n"
body_h += (
"TF_TLM_FUNC_ACK {_obc_name_upper}_pick_up_tlm_buffer(const "
+ driver_type
+ "* "
+ driver_name
+ ", {_obc_name_upper}_TLM_CODE tlm_id, uint8_t* packet, uint16_t* len, uint16_t max_len);\n"
)
body_c += (
"void {_obc_name_upper}_init_tlm_buffer(" + driver_type + "* " + driver_name + ")\n"
)
body_c += "{{\n"
body_c += " // packet などは,上位の driver の初期化で driver もろとも memset 0x00 されていると期待して,ここではしない\n"
body_c += " int i = 0;\n"
body_c += " for (i = 0; i < {_obc_name_upper}_MAX_TLM_NUM; ++i)\n"
body_c += " {{\n"
body_c += " " + driver_name + "->tlm_buffer.tlm[i].is_null_packet = 1;\n"
body_c += " }}\n"
body_c += "}}\n"
body_c += "\n"
body_c += (
"DS_ERR_CODE {_obc_name_upper}_buffer_tlm_packet(DS_StreamConfig* p_stream_config, "
+ driver_type
+ "* "
+ driver_name
+ ")\n"
)
body_c += "{{\n"
body_c += " {_obc_name_upper}_TLM_CODE tlm_id;\n"
body_c += " DS_ERR_CODE ret;\n"
body_c += "\n"
body_c += " ret = DS_C2AFMT_get_ctp(p_stream_config, &{_obc_name_upper}_ctp_);\n"
body_c += " if (ret != DS_ERR_CODE_OK) return ret;\n"
body_c += "\n"
body_c += " tlm_id = ({_obc_name_upper}_TLM_CODE)CTP_get_id(&{_obc_name_upper}_ctp_);\n"
body_c += "\n"
body_c += " switch (tlm_id)\n"
body_c += " {{\n"
for tlm in tlm_db:
tlm_name = tlm["tlm_name"]
tlm_name_upper = tlm_name.upper()
tlm_name_lower = tlm_name.lower()
body_c += " case {_obc_name_upper}_Tlm_CODE_" + tlm_name_upper + ":\n"
body_c += (
" return {_obc_name_upper}_analyze_tlm_"
+ tlm_name_lower
+ "_(&{_obc_name_upper}_ctp_, tlm_id, "
+ driver_name
+ ");\n"
)
body_c += " default:\n"
body_c += " " + settings["other_obc_data"][i]["code_when_tlm_not_found"] + "\n"
body_c += " return DS_ERR_CODE_ERR;\n"
body_c += " }}\n"
body_c += "}}\n"
body_c += "\n"
for tlm in tlm_db:
conv_tpye_to_temp = {
"int8_t": "temp_i8",
"int16_t": "temp_i16",
"int32_t": "temp_i32",
"uint8_t": "temp_u8",
"uint16_t": "temp_u16",
"uint32_t": "temp_u32",
"float": "temp_f",
"double": "temp_d",
}
conv_tpye_to_size = {
"int8_t": 1,
"int16_t": 2,
"int32_t": 4,
"uint8_t": 1,
"uint16_t": 2,
"uint32_t": 4,
"float": 4,
"double": 8,
}
tlm_name = tlm["tlm_name"]
tlm_name_upper = tlm_name.upper()
tlm_name_lower = tlm_name.lower()
body_c += (
"static DS_ERR_CODE {_obc_name_upper}_analyze_tlm_"
+ tlm_name_lower
+ "_(const CommonTlmPacket* packet, {_obc_name_upper}_TLM_CODE tlm_id, "
+ driver_type
+ "* "
+ driver_name
+ ")\n"
)
body_c += "{{\n"
body_c += " const uint8_t* f = packet->packet;\n"
for k, v in conv_tpye_to_temp.items():
if k == "float":
body_c += " " + k + " " + v + " = 0.0f;\n"
elif k == "double":
body_c += " " + k + " " + v + " = 0.0;\n"
else:
body_c += " " + k + " " + v + " = 0;\n"
body_c += "\n"
body_c += " // GS へのテレメ中継のためのバッファーへのコピー\n"
body_c += (
" CTP_copy_packet(&("
+ driver_name
+ "->tlm_buffer.tlm[tlm_id].packet), packet);\n"
)
body_c += " " + driver_name + "->tlm_buffer.tlm[tlm_id].is_null_packet = 0;\n"
body_c += " // TODO: CRC チェック\n"
body_c += "\n"
body_c += " // MOBC 内部でテレメデータへアクセスしやすいようにするための構造体へのパース\n"
last_var_type = ""
for j in range(DATA_START_ROW, len(tlm["data"])):
comment = tlm["data"][j][0]
name = EscapeTlmElemName_(tlm["data"][j][1])
var_type = tlm["data"][j][2]
if comment == "" and name == "": # CommentもNameも空白なら打ち切り
break
if comment != "":
continue
if name == "":
continue
if var_type == "":
var_type = last_var_type
last_var_type = var_type
if last_var_type == "":
continue
oct_pos = int(tlm["data"][j][5])
bit_pos = int(tlm["data"][j][6])
bit_len = int(tlm["data"][j][7])
is_compression = 0 # テレメ圧縮フラグ for ビットフィールドをつかってる奴ら
if tlm["data"][j][2] == "" or tlm["data"][j + 1][2] == "":
is_compression = 1
if (
tlm["data"][j + 1][0] == ""
and tlm["data"][j + 1][1] == ""
and tlm["data"][j][2] != ""
): # 最終行の除外
is_compression = 0
# name_tree = name.lower().split(".")[2:] # OBC名.テレメ名.HOGE.FUGA を想定
name_tree = name.lower().split(".")
name_path = ".".join(name_tree)
var_name = driver_name + "->tlm_data." + tlm_name_lower + "." + name_path
if is_compression:
body_c += (
" endian_memcpy(&"
+ conv_tpye_to_temp[var_type]
+ ", &(f["
+ str(oct_pos)
+ "]), "
+ str(conv_tpye_to_size[var_type])
+ ");\n"
)
body_c += (
" "
+ conv_tpye_to_temp[var_type]
+ " >>= "
+ str(conv_tpye_to_size[var_type] * 8 - bit_pos - bit_len)
+ ";\n"
)
body_c += (
" "
+ conv_tpye_to_temp[var_type]
+ " &= "
+ hex(int("0b" + "1" * bit_len, 2))
+ ";\n"
)
body_c += " " + var_name + " = " + conv_tpye_to_temp[var_type] + ";\n"
else:
body_c += (
" endian_memcpy(&("
+ var_name
+ "), &(f["
+ str(oct_pos)
+ "]), "
+ str(conv_tpye_to_size[var_type])
+ ");\n"
)
body_c += " // TODO: ビットフィールドをつかっている系は,様々なパターンがあり得るので,今後,バグが出ないか注視する\n"
body_c += "\n"
body_c += " // ワーニング回避\n"
for k, v in conv_tpye_to_temp.items():
body_c += " (void)" + v + ";\n"
body_c += "\n"
body_c += " return DS_ERR_CODE_OK;\n"
body_c += "}}\n"
body_c += "\n"
body_c += (
"TF_TLM_FUNC_ACK {_obc_name_upper}_pick_up_tlm_buffer(const "
+ driver_type
+ "* "
+ driver_name
+ ", {_obc_name_upper}_TLM_CODE tlm_id, uint8_t* packet, uint16_t* len, uint16_t max_len)\n"
)
body_c += "{{\n"
body_c += " const CommonTlmPacket* buffered_packet;\n"
body_c += "\n"
body_c += (
" if (tlm_id >= {_obc_name_upper}_MAX_TLM_NUM) return TF_TLM_FUNC_ACK_NOT_DEFINED;\n"
)
body_c += (
" if ("
+ driver_name
+ "->tlm_buffer.tlm[tlm_id].is_null_packet) return TF_TLM_FUNC_ACK_NULL_PACKET;\n"
)
body_c += "\n"
body_c += " buffered_packet = &(" + driver_name + "->tlm_buffer.tlm[tlm_id].packet);\n"
body_c += " *len = CTP_get_packet_len(buffered_packet);\n"
body_c += "\n"
body_c += " if (*len > max_len) return TF_TLM_FUNC_ACK_TOO_SHORT_LEN;\n"
body_c += "\n"
body_c += " memcpy(packet, &buffered_packet->packet, (size_t)(*len));\n"
body_c += " return TF_TLM_FUNC_ACK_SUCCESS;\n"
body_c += "}}\n"
body_c += "\n"
output_file_path = (
settings["c2a_root_dir"]
+ r"src_user/Drivers/"
+ settings["other_obc_data"][i]["driver_path"]
)
OutputTlmBufferC_(
output_file_path + obc_name.lower() + "_telemetry_buffer.c", obc_name, body_c, settings
)
OutputTlmBufferH_(
output_file_path + obc_name.lower() + "_telemetry_buffer.h", obc_name, body_h, settings
)
OutputTlmDataDefH_(
output_file_path + obc_name.lower() + "_telemetry_data_definitions.h",
obc_name,
tlmdef_body_h,
settings,
)
def OutputTlmBufferC_(file_path, name, body, settings):
name_upper = name.upper()
name_lower = name.lower()
name_capit = name.capitalize()
output = ""
output += """
#pragma section REPRO
/**
* @file
* @brief テレメトリバッファー(テレメ中継)
* @note このコードは自動生成されています!
*/
#include "./{_obc_name_lower}_telemetry_definitions.h"
#include "./{_obc_name_lower}_telemetry_buffer.h"
#include "./{_obc_name_lower}.h"
#include <string.h>
"""[
1:
] # 最初の改行を除く
output += body
output += """
#pragma section
"""[
1:
] # 最初の改行を除く
with open(file_path, mode="w", encoding=settings["output_file_encoding"]) as fh:
fh.write(
output.format(
_obc_name_upper=name_upper, _obc_name_lower=name_lower, _obc_name_capit=name_capit
)
)
def OutputTlmBufferH_(file_path, name, body, settings):
name_upper = name.upper()
name_lower = name.lower()
name_capit = name.capitalize()
output = ""
output += """
/**
* @file
* @brief テレメトリバッファー(テレメ中継)
* | |
'''
<NAME>
set up :2020-1-9
intergrate img and label into one file
-- fiducial1024_v1
'''
import argparse
import sys, os
import pickle
import random
import collections
import json
import numpy as np
import scipy.io as io
import scipy.misc as m
import matplotlib.pyplot as plt
import glob
import math
import time
import threading
import multiprocessing as mp
from multiprocessing import Pool
import re
import cv2
# sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN
import utils
def getDatasets(dir):
return os.listdir(dir)
class perturbed(utils.BasePerturbed):
def __init__(self, path, bg_path, save_path, save_suffix):
self.path = path
self.bg_path = bg_path
self.save_path = save_path
self.save_suffix = save_suffix
def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'):
origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR)
save_img_shape = [512*2, 480*2] # 320
# reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1])
reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])
# reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18])
# reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09])
base_img_shrink = save_img_shape[0] - reduce_value
# enlarge_img_shrink = [1024, 768]
# enlarge_img_shrink = [896, 672] # 420
enlarge_img_shrink = [512*4, 480*4] # 420
# enlarge_img_shrink = [896*2, 768*2] # 420
# enlarge_img_shrink = [896, 768] # 420
# enlarge_img_shrink = [768, 576] # 420
# enlarge_img_shrink = [640, 480] # 420
''''''
im_lr = origin_img.shape[0]
im_ud = origin_img.shape[1]
reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
# reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14])
if im_lr > im_ud:
im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2)
im_lr = save_img_shape[0] - reduce_value
else:
base_img_shrink = save_img_shape[1] - reduce_value
im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2)
im_ud = base_img_shrink
if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5:
repeat_time = min(repeat_time, 8)
edge_padding = 3
im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1
im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1
im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64)
im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64)
# im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1
# im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1
# im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64)
# im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64)
im_x, im_y = np.meshgrid(im_hight, im_wide)
segment_x = (im_lr) // (fiducial_points-1)
segment_y = (im_ud) // (fiducial_points-1)
# plt.plot(im_x, im_y,
# color='limegreen',
# marker='.',
# linestyle='')
# plt.grid(True)
# plt.show()
self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)
perturbed_bg_ = getDatasets(self.bg_path)
perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_)
perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)
mesh_shape = self.origin_img.shape[:2]
self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img)
# self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img)
self.new_shape = self.synthesis_perturbed_img.shape[:2]
perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA)
origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2)
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2))
# self.perturbed_xy_ = pixel_position.copy().astype(np.float32)
# fiducial_points_grid = origin_pixel_position[im_x, im_y]
self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2))
x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape)
origin_pixel_position += [x_min, y_min]
x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1])
x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16)
y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16)
x_min += x_shift
x_max += x_shift
y_min += y_shift
y_max += y_shift
'''im_x,y'''
im_x += x_min
im_y += y_min
self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img
self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position
synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy()
synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy()
foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16)
foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16)
foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label
# synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max)
# synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max)
'''*****************************************************************'''
is_normalizationFun_mixture = self.is_perform(0.2, 0.8)
# if not is_normalizationFun_mixture:
normalizationFun_0_1 = False
# normalizationFun_0_1 = self.is_perform(0.5, 0.5)
if fold_curve == 'fold':
fold_curve_random = True
# is_normalizationFun_mixture = False
normalizationFun_0_1 = self.is_perform(0.2, 0.8)
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99)
alpha_perturbed = random.randint(80, 160) / 100
# is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99)
synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
# synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16)
synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
alpha_perturbed_change = self.is_perform(0.5, 0.5)
p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9)
for repeat_i in range(repeat_time):
if alpha_perturbed_change:
if fold_curve == 'fold':
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
alpha_perturbed = random.randint(80, 160) / 100
''''''
linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1,
self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1]
linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1,
self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1]
linspace_x_seq = [1, 2, 3]
linspace_y_seq = [1, 2, 3]
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_p = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice:
linspace_x_seq.remove(r_x)
linspace_y_seq.remove(r_y)
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_pp = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
# perturbed_p, perturbed_pp = np.array(
# [random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10]) \
# , np.array([random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10])
# perturbed_p, perturbed_pp = np.array(
# [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \
# , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10])
''''''
perturbed_vp = perturbed_pp - perturbed_p
perturbed_vp_norm = np.linalg.norm(perturbed_vp)
perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm
''''''
# perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100])
# perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100])
if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7):
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100])
# perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100])
else:
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
# perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100])
perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100])
# perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100])
# perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10])
''''''
if fold_curve == 'fold':
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
''''''
if fold_curve_random:
# omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed)
# omega_perturbed = alpha_perturbed**perturbed_d
omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed)
else:
omega_perturbed = 1 - perturbed_d ** alpha_perturbed
'''shadow'''
if self.is_perform(0.6, 0.4):
synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255)
''''''
if relativeShift_position in ['position', 'relativeShift_v2']:
self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0)
else:
print('relativeShift_position error')
exit()
'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
# synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8)
synthesis_perturbed_label[:, :, 0] *= foreORbackground_label
synthesis_perturbed_label[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 0] *= foreORbackground_label
synthesis_perturbed_img[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 2] *= foreORbackground_label
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
'''
'''perspective'''
perspective_shreshold = random.randint(26, 36)*10 # 280
x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold)
pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]])
e_1_ = x_max_per - x_min_per
e_2_ = y_max_per - y_min_per
e_3_ = e_2_
e_4_ = e_1_
perspective_shreshold_h = e_1_*0.02
perspective_shreshold_w = e_2_*0.02
a_min_, a_max_ = 70, 110
# if self.is_perform(1, 0):
if fold_curve == 'curve' and self.is_perform(0.5, 0.5):
if self.is_perform(0.5, 0.5):
while True:
pts2 = np.around(
np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(
np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) | |
# Copyright 2012-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing pymongo
"""
import collections
import contextlib
import functools
import os
import re
import sys
import threading
import time
import warnings
from collections import defaultdict
from functools import partial
from bson import json_util, py3compat
from bson.objectid import ObjectId
from pymongo import MongoClient, monitoring
from pymongo.errors import OperationFailure
from pymongo.monitoring import _SENSITIVE_COMMANDS
from pymongo.read_concern import ReadConcern
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.write_concern import WriteConcern
from test import client_context, db_user, db_pwd
IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000)
class WhiteListEventListener(monitoring.CommandListener):
def __init__(self, *commands):
self.commands = set(commands)
self.results = defaultdict(list)
def started(self, event):
if event.command_name in self.commands:
self.results['started'].append(event)
def succeeded(self, event):
if event.command_name in self.commands:
self.results['succeeded'].append(event)
def failed(self, event):
if event.command_name in self.commands:
self.results['failed'].append(event)
class EventListener(monitoring.CommandListener):
def __init__(self):
self.results = defaultdict(list)
def started(self, event):
self.results['started'].append(event)
def succeeded(self, event):
self.results['succeeded'].append(event)
def failed(self, event):
self.results['failed'].append(event)
def started_command_names(self):
"""Return list of command names started."""
return [event.command_name for event in self.results['started']]
class OvertCommandListener(EventListener):
"""A CommandListener that ignores sensitive commands."""
def started(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).started(event)
def succeeded(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).succeeded(event)
def failed(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).failed(event)
class ServerAndTopologyEventListener(monitoring.ServerListener,
monitoring.TopologyListener):
"""Listens to all events."""
def __init__(self):
self.results = []
def opened(self, event):
self.results.append(event)
def description_changed(self, event):
self.results.append(event)
def closed(self, event):
self.results.append(event)
class HeartbeatEventListener(monitoring.ServerHeartbeatListener):
"""Listens to only server heartbeat events."""
def __init__(self):
self.results = []
def started(self, event):
self.results.append(event)
def succeeded(self, event):
self.results.append(event)
def failed(self, event):
self.results.append(event)
class ScenarioDict(dict):
"""Dict that returns {} for any unknown key, recursively."""
def __init__(self, data):
def convert(v):
if isinstance(v, collections.Mapping):
return ScenarioDict(v)
if isinstance(v, py3compat.string_type):
return v
if isinstance(v, collections.Sequence):
return [convert(item) for item in v]
return v
dict.__init__(self, [(k, convert(v)) for k, v in data.items()])
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
# Unlike a defaultdict, don't set the key, just return a dict.
return ScenarioDict({})
class CompareType(object):
"""Class that compares equal to any object of the given type."""
def __init__(self, type):
self.type = type
def __eq__(self, other):
return isinstance(other, self.type)
def __ne__(self, other):
"""Needed for Python 2."""
return not self.__eq__(other)
class TestCreator(object):
"""Class to create test cases from specifications."""
def __init__(self, create_test, test_class, test_path):
"""Create a TestCreator object.
:Parameters:
- `create_test`: callback that returns a test case. The callback
must accept the following arguments - a dictionary containing the
entire test specification (the `scenario_def`), a dictionary
containing the specification for which the test case will be
generated (the `test_def`).
- `test_class`: the unittest.TestCase class in which to create the
test case.
- `test_path`: path to the directory containing the JSON files with
the test specifications.
"""
self._create_test = create_test
self._test_class = test_class
self.test_path = test_path
def _ensure_min_max_server_version(self, scenario_def, method):
"""Test modifier that enforces a version range for the server on a
test case."""
if 'minServerVersion' in scenario_def:
min_ver = tuple(
int(elt) for
elt in scenario_def['minServerVersion'].split('.'))
if min_ver is not None:
method = client_context.require_version_min(*min_ver)(method)
if 'maxServerVersion' in scenario_def:
max_ver = tuple(
int(elt) for
elt in scenario_def['maxServerVersion'].split('.'))
if max_ver is not None:
method = client_context.require_version_max(*max_ver)(method)
return method
@staticmethod
def valid_topology(run_on_req):
return client_context.is_topology_type(
run_on_req.get('topology', ['single', 'replicaset', 'sharded']))
@staticmethod
def min_server_version(run_on_req):
version = run_on_req.get('minServerVersion')
if version:
min_ver = tuple(int(elt) for elt in version.split('.'))
return client_context.version >= min_ver
return True
@staticmethod
def max_server_version(run_on_req):
version = run_on_req.get('maxServerVersion')
if version:
max_ver = tuple(int(elt) for elt in version.split('.'))
return client_context.version <= max_ver
return True
def should_run_on(self, scenario_def):
run_on = scenario_def.get('runOn', [])
if not run_on:
# Always run these tests.
return True
for req in run_on:
if (self.valid_topology(req) and
self.min_server_version(req) and
self.max_server_version(req)):
return True
return False
def ensure_run_on(self, scenario_def, method):
"""Test modifier that enforces a 'runOn' on a test case."""
return client_context._require(
lambda: self.should_run_on(scenario_def),
"runOn not satisfied",
method)
def create_tests(self):
for dirpath, _, filenames in os.walk(self.test_path):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = ScenarioDict(
json_util.loads(scenario_stream.read()))
test_type = os.path.splitext(filename)[0]
# Construct test from scenario.
for test_def in scenario_def['tests']:
test_name = 'test_%s_%s_%s' % (
dirname, test_type,
str(test_def['description'].replace(" ", "_")))
new_test = self._create_test(
scenario_def, test_def, test_name)
new_test = self._ensure_min_max_server_version(
scenario_def, new_test)
new_test = self.ensure_run_on(
scenario_def, new_test)
new_test.__name__ = test_name
setattr(self._test_class, new_test.__name__, new_test)
def _connection_string(h, authenticate):
if h.startswith("mongodb://"):
return h
elif client_context.auth_enabled and authenticate:
return "mongodb://%s:%s@%s" % (db_user, db_pwd, str(h))
else:
return "mongodb://%s" % (str(h),)
def _mongo_client(host, port, authenticate=True, direct=False, **kwargs):
"""Create a new client over SSL/TLS if necessary."""
host = host or client_context.host
port = port or client_context.port
client_options = client_context.default_client_options.copy()
if client_context.replica_set_name and not direct:
client_options['replicaSet'] = client_context.replica_set_name
client_options.update(kwargs)
client = MongoClient(_connection_string(host, authenticate), port,
**client_options)
return client
def single_client_noauth(h=None, p=None, **kwargs):
"""Make a direct connection. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, direct=True, **kwargs)
def single_client(h=None, p=None, **kwargs):
"""Make a direct connection, and authenticate if necessary."""
return _mongo_client(h, p, direct=True, **kwargs)
def rs_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_client(h=None, p=None, **kwargs):
"""Connect to the replica set and authenticate if necessary."""
return _mongo_client(h, p, **kwargs)
def rs_or_single_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Like rs_or_single_client, but does not authenticate.
"""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_or_single_client(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Authenticates if necessary.
"""
return _mongo_client(h, p, **kwargs)
def one(s):
"""Get one element of a set"""
return next(iter(s))
def oid_generated_on_process(oid):
"""Makes a determination as to whether the given ObjectId was generated
by the current process, based on the 5-byte random number in the ObjectId.
"""
return ObjectId._random() == oid.binary[4:9]
def delay(sec):
return '''function() { sleep(%f * 1000); return true; }''' % sec
def get_command_line(client):
command_line = client.admin.command('getCmdLineOpts')
assert command_line['ok'] == 1, "getCmdLineOpts() failed"
return command_line
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def camel_to_upper_camel(camel):
return camel[0].upper() + camel[1:]
def camel_to_snake_args(arguments):
for arg_name in list(arguments):
c2s = camel_to_snake(arg_name)
arguments[c2s] = arguments.pop(arg_name)
return arguments
def parse_collection_options(opts):
if 'readPreference' in opts:
opts['read_preference'] = parse_read_preference(
opts.pop('readPreference'))
if 'writeConcern' in opts:
opts['write_concern'] = WriteConcern(
**dict(opts.pop('writeConcern')))
if 'readConcern' in opts:
opts['read_concern'] = ReadConcern(
**dict(opts.pop('readConcern')))
return opts
def server_started_with_option(client, cmdline_opt, config_opt):
"""Check if the server was started with a particular option.
:Parameters:
- `cmdline_opt`: The command line option (i.e. --nojournal)
- `config_opt`: The config file option (i.e. nojournal)
"""
command_line = get_command_line(client)
if 'parsed' in command_line:
parsed = command_line['parsed']
if config_opt in parsed:
return parsed[config_opt]
argv = command_line['argv']
return cmdline_opt in argv
def server_started_with_auth(client):
try:
command_line = get_command_line(client)
except OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 13 or 'unauthorized' in msg or 'login' in msg:
# Unauthorized.
return True
raise
# MongoDB >= 2.0
if 'parsed' in command_line:
parsed = command_line['parsed']
# MongoDB >= 2.6
if 'security' in parsed:
security = parsed['security']
# >= rc3
if 'authorization' in security:
return security['authorization'] == 'enabled'
# < rc3
return security.get('auth', False) or bool(security.get('keyFile'))
return parsed.get('auth', False) or bool(parsed.get('keyFile'))
# Legacy
argv = command_line['argv']
return '--auth' in argv or '--keyFile' in argv
def server_started_with_nojournal(client):
command_line = get_command_line(client)
# MongoDB 2.6.
if 'parsed' in command_line:
parsed = command_line['parsed']
if 'storage' in parsed:
storage = parsed['storage']
if 'journal' in storage:
return not storage['journal']['enabled']
return server_started_with_option(client, '--nojournal', 'nojournal')
def server_is_master_with_slave(client):
command_line = get_command_line(client)
if 'parsed' in command_line:
return command_line['parsed'].get('master', False)
return '--master' in command_line['argv']
def drop_collections(db):
# Drop all non-system collections in this database.
for coll in db.list_collection_names(
filter={"name": {"$regex": r"^(?!system\.)"}}):
db.drop_collection(coll)
def remove_all_users(db):
db.command("dropAllUsersFromDatabase", 1,
writeConcern={"w": client_context.w})
def joinall(threads):
"""Join threads with a 5-minute timeout, assert joins succeeded"""
for t in threads:
t.join(300)
assert not t.isAlive(), "Thread %s hung" % t
def connected(client):
"""Convenience to wait for a newly-constructed client to connect."""
with warnings.catch_warnings():
# Ignore warning that "ismaster" is always routed to primary even
# if client's read preference isn't PRIMARY.
warnings.simplefilter("ignore", UserWarning)
| |
<gh_stars>10-100
import os
import time
import json
import glob
import datetime
import csv
import logging
import shutil
import sys
import copy
import pandas as pd
import jsonschema
from mlapp.config import settings
from mlapp.managers.pipeline_manager import PipelineManager
from mlapp.utils import general as general_utils
from mlapp.utils.logger import Logger
from mlapp.managers.io_manager import IOManager
from mlapp.handlers.wrappers.database_wrapper import database_instance
from mlapp.handlers.wrappers.file_storage_wrapper import file_storage_instance
from mlapp.handlers.wrappers.spark_wrapper import spark_instance
from mlapp.utils.exceptions.framework_exceptions import SkipToLocalException, UnsupportedFileType, DataFrameNotFound
from mlapp.utils.exceptions.base_exceptions import ConfigKeyError, FrameworkException, ConfigError, JobManagerException
from matplotlib.figure import Figure
try:
import plotly.graph_objects as go
except ModuleNotFoundError:
go = None
class JobManager(object):
def __init__(self, job_id, config, **kwargs):
"""
Constructor for he job manager.
Its main role is to populate the input from external environment (e.g. trained model's objects) and
store the outputs in the relevant location (e.g. store figures, objects in file storage service or in local
directory in case a file storage is not defined. Store data in database and more.
The job manager is using the singleton handlers to connect to the different services like DB and FileStore
:param job_id: the id of this current job.
:param config: the config string of user configurations for the asset
:param kwargs: run_id and any initial input to the asset.
run_id - All outputs files to be created will be given this identifier
"""
# Handlers:
self.output_logger_filename = "output_logs.csv"
self.file_store_handler = file_storage_instance
self.db_handler = database_instance
self.spark_handler = spark_instance
self.file_objects_types = {
"pyspark": {
"type": ("pyspark" if self.file_store_handler.empty() else "pyspark.zip"),
"load_method": self._load_files_into_spark_object,
"store_method": self._store_spark_object_into_files
},
"pkl": {
"type": "pkl.pkl",
"load_method": self._load_pickle_as_object,
"store_method": self._store_object_as_pickle
}
# "tensorflow",
# "pytorch",
# "keras",
}
# config parts
self.job_id = job_id
self.config = config
self.data_settings = config.get('data_settings', {})
self.model_settings = config.get('model_settings', {})
self.flow_settings = config.get('flow_settings', {})
self.job_settings = config.get('job_settings', {})
self.filestore_buckets = settings.get("file_store_buckets", {})
self.local_storage_path = settings.get("local_storage_path", "output")
self.temporary_storage_path = settings.get("temporary_storage_path", "temporary_output")
self.temporary_storage_files = set()
self.set_output_folders()
self.start_time = time.strftime('%Y-%m-%d %H:%M:%S')
self.last_time = self.start_time
self.deploy_environment = settings.get("deploy_environment", "default")
self.deploy_version = settings.get("deploy_version", "-")
self.identity = {
'run_id': kwargs['run_id'],
'pipeline': self.job_settings.get('pipeline', None),
'asset_name': self.get_asset_name(kwargs.get('has_flow_summary')),
'asset_label': self.job_settings.get('asset_label', None)
}
# consts
self.DOT = '.'
self.LOAD = 'LOAD'
self.SAVE = 'SAVE'
self.DATA = 'data'
self.MODELS = 'models'
self.FEATURES = 'features'
self.INPUT_FROM_PREDECESSOR = 'input_from_predecessor'
self.input_manager = IOManager()
self.output_manager = IOManager()
def __del__(self):
self.clean_output_folders()
##########################################################################
# #
# Run Pipeline #
# #
##########################################################################
def run_pipeline(self, *args, **kwargs):
"""
Loads the inputs defined in the config ( e.g. pre trained objects, metadata and more)
Initiates a pipeline manager, runs it
Stores the outputs in the relevant locations
:param args: arguments to be passed to the first stage in the pipeline
:param kwargs: keyword arguments to be loaded into the Input (an IOManager instance)
:return: IOManager instance with all the outputs to be stored
"""
try:
self._store_job_config()
self.log_to_file(run_id=None) # no `run_id` output yet
self.validate_config()
self.load_input(**kwargs)
pipeline_manager = PipelineManager(
self.identity['run_id'],
self.job_settings['pipeline'],
_input=self.input_manager, _output=self.output_manager,
config=copy.deepcopy(self.config),
**kwargs
)
self.pipeline_start_print(self.identity['run_id'])
pipeline_manager.run(*args)
self.pipeline_end_print(self.identity['run_id'])
self.store_output()
self.temporary_files_message()
except Exception as error:
self.identity['run_id'] = None # pipeline failed
log = logging.getLogger(self.job_id)
log.error(str(error), exc_info=True)
raise error
finally:
# saving logger file
self._store_logger_file(run_id=self.identity['run_id'])
return self.output_manager
##########################################################################
# #
# Get Functions #
# #
##########################################################################
def get_custom_filename(self, run_id, name):
"""
:param run_id: unique id of the asset
:param name: file name
:return: proper concatenation of the file name
"""
return str(run_id) + '_' + self.identity['asset_name'] + '_' + name
def get_dataframe_filename(self, run_id, df_name):
"""
:param run_id: unique id of the asset
:param df_name: dataframe name
:return: proper concatenation of the dataframe file name
"""
return str(run_id) + '_' + self.identity['asset_name'] + self.DOT + df_name + '.csv'
def get_config_filename(self, run_id):
"""
:param run_id: unique id of the asset
:return: proper concatenation of the config file name
"""
return str(run_id) + '_' + self.identity['asset_name'] + '.config.json'
def get_job_config_filename(self):
"""
:return: proper concatenation of the config file name
"""
return str(self.job_id) + '.config.json'
def get_metadata_filename(self, run_id):
"""
:param run_id: unique id of the asset
:return: proper concatenation of the metadata file name
"""
return str(run_id) + '_' + self.identity['asset_name'] + '.metadata.json'
@staticmethod
def get_flow_metadata_filename(run_id):
"""
:param run_id: unique id of the asset
:return: proper concatenation of the flow-summary-asset metadata file name
"""
return str(run_id) + '_flow_summary.metadata.json'
def get_features_filename(self, run_id):
"""
:param run_id: unique id of the asset
:return: proper concatenation of the feature file name
"""
return str(run_id) + '_' + self.identity['asset_name'] + '.features.csv'
def get_objects_filename(self, run_id, manager, mlapp_type, file_type, model_name=None, class_name=None):
"""
:param run_id: string, unique id of the asset
:param manager: string , manager name
:param mlapp_type: string , one of the keys in self.file_objects_types
:param file_type: string , file type for example, .csv .txt etc
:param model_name: string , name of the asset
:param class_name: string, spark object class name.
:return: proper concatenation of the object file name
"""
if model_name is None and class_name is None:
return str(run_id) + '_' + self.identity['asset_name'] + \
self.DOT + manager + self.DOT + mlapp_type + self.DOT + file_type
elif class_name is None:
return str(run_id) + '_' + self.identity['asset_name'] + self.DOT + model_name + \
self.DOT + manager + self.DOT + mlapp_type + self.DOT + file_type
elif model_name is None:
return str(run_id) + '_' + self.identity['asset_name'] + self.DOT + class_name + \
self.DOT + manager + self.DOT + mlapp_type + self.DOT + file_type
else:
return str(run_id) + '_' + self.identity['asset_name'] + self.DOT + class_name + \
self.DOT + model_name + self.DOT + manager + self.DOT + mlapp_type + self.DOT + file_type
def get_objects_modules_filename(self, run_id):
"""
:param run_id: string, unique id of the asset
:return: proper concatenation of the object modules json file name
"""
return str(run_id) + '_' + self.identity['asset_name'] + '.objects.modules.json'
def get_logger_filename(self, run_id=None):
"""
:param run_id: string, unique id of the asset
:return: proper concatenation of the logger file name
"""
if run_id is not None:
return str(run_id) + '_' + str(self.job_id) + '.logger.txt'
else:
return str(self.job_id) + '.logger.txt'
def get_asset_name(self, has_flow_summary):
"""
:param has_flow_summary: if the config contains flow summary section
:return: model name from the config
"""
if has_flow_summary is not None and not has_flow_summary:
return None
else:
asset_name = self.job_settings.get('asset_name', None)
if asset_name is None:
# model_name deprecation
asset_name = self.job_settings.get('model_name', None)
if asset_name is None:
raise ConfigKeyError('asset_name is required in job_settings config.')
else:
try:
raise DeprecationWarning('model_name is deprecated please use asset_name instead.')
except Warning as w:
print("DeprecationWarning: " + str(w))
return asset_name
###############################################################
# #
# Load Input #
# #
###############################################################
def load_input(self, **kwargs):
"""
This is Main Load Input function that called in run_pipeline function.
It loads any input specified in the config (e.g. trained model object, saved features DataFrame etc.).
The function will load the input into an IOManager instance - the InputManager.
:param kwargs: input from predecessor pipeline. Passed by the flow manager.
:return: None
:param kwargs:
:return:
"""
# Todo: 1. merge in kwargs into the input_manager by calling the update_recursive_dict(input,kwargs)
if kwargs:
self.input_manager.add_analysis_metadata(self.INPUT_FROM_PREDECESSOR, kwargs)
# get ids from config
config_model_id = self._load_model_id_from_config()
config_data_id = self._load_data_id_from_config()
config_reuse_features_id = self._load_reuse_features_id_from_config()
# config_reuse_features_flag = self._load_reuse_features_flag_from_config()
# if there is no ids in the config, skip load input method.
if config_model_id is None and config_data_id is None and config_reuse_features_id is None:
return
# loads required files from storage.
model_config = self.load_config(config_model_id)
# looking for the right config_data_id
config_data_id = self._decide_on_config_data_id(config_data_id, model_config, config_model_id)
data_config = self.load_config(config_data_id)
train_metadata = self.load_metadata(config_model_id)
train_features = self.load_features(config_reuse_features_id)
train_objects = self.load_objects(config_model_id, config_data_id)
# updates config if necessary
self.update_config(model_config, data_config)
# gets train data and models metadata
if train_metadata is not None:
train_data_metadata = train_metadata.get(self.DATA, {})
train_models_metadata = train_metadata.get(self.MODELS, {})
# add analysis metadata to input manager
self.input_manager.add_analysis_metadata(self.DATA, train_data_metadata)
self.input_manager.add_analysis_metadata(self.MODELS, train_models_metadata)
# gets train data and models objects
if train_objects is not None:
train_data_objects = train_objects.get(self.DATA, {})
train_models_objects = train_objects.get(self.MODELS, {})
# add objects to input manager
self.input_manager.add_objects(self.DATA, train_data_objects)
self.input_manager.add_objects(self.MODELS, train_models_objects)
# check if features reused
if train_features is not None:
self.input_manager.add_dataframe(self.FEATURES, train_features)
@staticmethod
def _decide_on_config_data_id(config_data_id, model_config, config_model_id):
"""
Decide on the right data_id for model forecast to reuse
scenario 1: data_id is supplied in the main config
scenario 2: data_id missing in main config, but supplied in the reused model config (the train model | |
<reponame>linlabbcm/rasmc<filename>python_scripts/1_chip_pipeline.py
#!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2017 YOUR NAME HERE and <NAME> lab
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
#Main method run script for processing of YOUR PROJECT HERE
#==========================================================================
#=============================DEPENDENCIES=================================
#==========================================================================
import sys, os
# Get the script's full local path
whereAmI = os.path.dirname(os.path.realpath(__file__))
pipeline_dir = '/storage/cylin/bin/pipeline/'
sys.path.append(whereAmI)
sys.path.append(pipeline_dir)
import pipeline_dfci
import utils
import string
import numpy
import os
import re
from collections import defaultdict
import subprocess
import itertools
from scipy import stats
#==========================================================================
#============================PARAMETERS====================================
#==========================================================================
projectName = 'rasmc_all'
genome ='rn6'
annotFile = '%s/annotation/%s_refseq.ucsc' % (pipeline_dir,genome)
#project folders
projectFolder = '/storage/cylin/grail/projects/%s' % (projectName) #PATH TO YOUR PROJECT FOLDER
projectFolder = utils.formatFolder(projectFolder,True)
#standard folder names
gffFolder ='%sgff/' % (projectFolder)
macsFolder = '%smacsFolder/' % (projectFolder)
macsEnrichedFolder = '%smacsEnriched/' % (projectFolder)
mappedEnrichedFolder = '%smappedEnriched/' % (projectFolder)
mappedFolder = '%smappedFolder/' % (projectFolder)
wiggleFolder = '%swiggles/' % (projectFolder)
metaFolder = '%smeta/' % (projectFolder)
metaRoseFolder = '%smeta_rose/' % (projectFolder)
roseFolder = '%srose/' % (projectFolder)
fastaFolder = '%sfasta/' % (projectFolder)
bedFolder = '%sbed/' % (projectFolder)
figuresFolder = '%sfigures/' % (projectFolder)
geneListFolder = '%sgeneListFolder/' % (projectFolder)
bedFolder = '%sbeds/' % (projectFolder)
signalFolder = '%ssignalTables/' % (projectFolder)
tableFolder = '%stables/' % (projectFolder)
#mask Files
#genomeDirectory #select your genome
#genomeDirectory = '/grail/genomes/Mus_musculus/UCSC/mm9/Sequence/Chromosomes/'
#genomeDirectory = '/grail/genomes/Mus_musculus/UCSC/hg19/Sequence/Chromosomes/'
#making folders
folderList = [gffFolder,macsFolder,macsEnrichedFolder,mappedEnrichedFolder,mappedFolder,wiggleFolder,metaFolder,metaRoseFolder,roseFolder,fastaFolder,figuresFolder,geneListFolder,bedFolder,signalFolder,tableFolder]
for folder in folderList:
pipeline_dfci.formatFolder(folder,True)
#==========================================================================
#============================LIST OF DATAFILES=============================
#==========================================================================
#this project will utilize multiple datatables
#data tables are organized largely by type/system
#some data tables overlap for ease of analysis
#ChIP-Rx
chip_data_file = '%sdata_tables/RASMC_CHIP_DATA_TABLE.txt' % (projectFolder)
#==========================================================================
#===========================MAIN METHOD====================================
#==========================================================================
def main():
print('main analysis for project %s' % (projectName))
print('changing directory to project folder')
os.chdir(projectFolder)
print('\n\n')
print('#======================================================================')
print('#======================I, LOADING DATA ANNOTATION======================')
print('#======================================================================')
print('\n\n')
#This section sanity checks each data table and makes sure both bam and .bai files are accessible
#for data file
pipeline_dfci.summary(chip_data_file)
#assumes macs has already been run and formatted
# run_macs(chip_data_file)
# sys.exit()
print('\n\n')
print('#======================================================================')
print('#======================II. PLOTTING BRD4 TRACKS========================')
print('#======================================================================')
print('\n\n')
# for BRD4
dataFile = chip_data_file
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/figure_1.gff'
outputFolder = utils.formatFolder('%sgenePlot' % (projectFolder),True)
plotName = 'rasmc_all_figure_1_brd4_tracks'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/figure_2.gff'
plotName = 'rasmc_all_figure_2_brd4_tracks'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
print('AAA')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/figure_3.gff'
plotName = 'rasmc_all_figure_3_brd4_tracks'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
figureGFF=[['chr16','Btd','',7671879,7881204,'','.','Btd']]
figureGFFPath='%sBtd.gff' % (gffFolder)
utils.unParseTable(figureGFF,figureGFFPath,'\t')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/Btd.gff'
plotName = 'rasmc_all_btd_brd4_tracks'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/enrich_tfs.gff'
plotName = 'rasmc_all_enrich_tfs_brd4_tracks'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
print('BBB')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/Edn1.gff'
plotName = 'rasmc_all_edn1_brd4_tracks'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/Edn1.gff'
plotName = 'rasmc_all_edn1_pol2_tracks'
namesList = ['RASMC_POL2_UNSTIM_NEW','RASMC_POL2_PDGF_2H_NEW','RASMC_POL2_PDGF_2H_JQ1_NEW','RASMC_POL2_PDGF_24H_NEW','RASMC_POL2_PDGF_24H_JQ1_NEW']
print(namesList)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MULTIPLE',extension=200,multiPage = False,debug=False,nameString ='' ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/regions_fosl1_atf4_spry2_thbs1.gff'
plotName = 'spry2_ext_brd4_tracks_with_up_down_beds'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
bedString = '/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v2_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v24_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_2v24_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v2_down.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v24_down.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_2v24_down.bed'
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed = bedString,plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/regions_fosl1_atf4_spry2_thbs1.gff'
plotName = 'spry2_ext_pol2_tracks'
namesList = ['RASMC_POL2_UNSTIM_NEW','RASMC_POL2_PDGF_2H_NEW','RASMC_POL2_PDGF_2H_JQ1_NEW','RASMC_POL2_PDGF_24H_NEW','RASMC_POL2_PDGF_24H_JQ1_NEW']
print(namesList)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed =bedString,plotType= 'MULTIPLE',extension=200,multiPage = False,debug=False,nameString ='' ,rpm=True,rxGenome = '')
print('CCC')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/Acta2.gff'
plotName = 'Acta2_brd4_tracks'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/Acta2.gff'
plotName = 'Acta2_pol2_tracks'
namesList = ['RASMC_POL2_UNSTIM_NEW','RASMC_POL2_PDGF_2H_NEW','RASMC_POL2_PDGF_2H_JQ1_NEW','RASMC_POL2_PDGF_24H_NEW','RASMC_POL2_PDGF_24H_JQ1_NEW']
print(namesList)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed ='',plotType= 'MULTIPLE',extension=200,multiPage = False,debug=False,nameString ='' ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/cluster_b_genes_of_interest.gff'
plotName = 'cluster_b_goi_brd4_tracks_with_up_down_beds'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
bedString = '/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v2_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v24_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_2v24_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v2_down.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v24_down.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_2v24_down.bed'
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed = bedString,plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/cluster_b_genes_of_interest.gff'
plotName = 'cluster_b_goi_pol2_tracks'
namesList = ['RASMC_POL2_UNSTIM_NEW','RASMC_POL2_PDGF_2H_NEW','RASMC_POL2_PDGF_2H_JQ1_NEW','RASMC_POL2_PDGF_24H_NEW','RASMC_POL2_PDGF_24H_JQ1_NEW']
print(namesList)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed = bedString,plotType= 'MULTIPLE',extension=200,multiPage = False,debug=False,nameString ='' ,rpm=True,rxGenome = '')
print('DDD')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/figure_3_supp_tracks.gff'
plotName = 'figure_3_supp_brd4_tracks_with_up_down_beds'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
bedString = '/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v2_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v24_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_2v24_up.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v2_down.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_0v24_down.bed,/storage/cylin/grail/projects/rasmc_all/beds/enhPro_h3k_gff_regions_BRD4_2v24_down.bed'
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed = bedString,plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
figureGFFPath = '/storage/cylin/grail/projects/rasmc_all/gff/figure_3_supp_tracks.gff'
plotName = 'figure_3_supp_pol2_tracks'
namesList = ['RASMC_POL2_UNSTIM_NEW','RASMC_POL2_PDGF_2H_NEW','RASMC_POL2_PDGF_2H_JQ1_NEW','RASMC_POL2_PDGF_24H_NEW','RASMC_POL2_PDGF_24H_JQ1_NEW']
print(namesList)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed = bedString,plotType= 'MULTIPLE',extension=200,multiPage = False,debug=False,nameString ='' ,rpm=True,rxGenome = '')
figureGFFPath= '/storage/cylin/grail/projects/rasmc_all/gff/thrb_enhancer_regions.gff'
plotName = 'thrb_enhancer_regions_brd4'
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
groupList = []
for name in namesList:
if name.count('UNSTIM') > 0:
groupList.append('UNSTIM')
if name.count('PDGF_2H') > 0:
groupList.append('PDGF_2H')
if name.count('PDGF_24H') >0:
groupList.append('PDGF_24H')
print(namesList)
groupString = ','.join(groupList)
print(groupString)
# pipeline_dfci.callBatchPlot(dataFile,figureGFFPath,plotName,outputFolder,namesList,uniform=True,bed = '',plotType= 'MERGE',extension=200,multiPage = False,debug=False,nameString = groupString ,rpm=True,rxGenome = '')
print('EEE')
print('\n\n')
print('#======================================================================')
print('#======================III. ENHANCER PROMOTER==========================')
print('#======================================================================')
print('\n\n')
dataDict = pipeline_dfci.loadDataTable(chip_data_file)
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
allLoci = []
# for name in namesList:
# collection = utils.importBoundRegion('/storage/cylin/grail/projects/rasmc_all/macsEnriched/%s_peaks.bed' %(name),name)
# allLoci += collection.getLoci()
#do this for each one in the namesList
#then make a giant collection
# giant_collection = utils.LocusCollection(allLoci,50)
# stitched_collection = giant_collection.stitchCollection()
# gff = utils.locusCollectionToGFF(stitched_collection)
# utils.unParseTable(gff,'/storage/cylin/grail/projects/rasmc_all/gff/RN6_RASMC_BRD4_STITCHED_-0_+0.gff','\t')
dataFile = chip_data_file
namesList = ['RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
gff = '%sRN6_RASMC_BRD4_STITCHED_-0_+0.gff' % (gffFolder)
activityTable = '%sactiveListTable.txt' % (tableFolder)
outputFolder = '%senhancerPromoter/BRD4/' % (projectFolder)
desc_string = '0_STITCH_-_JQ1'
# makeEnhProBash(dataFile,gff,activityTable,namesList,outputFolder,desc_string)
annotTable = utils.parseTable(annotFile,'\t')
dictFile = '/storage/cylin/bin/pipeline/crc/annotation/MotifDictionary.txt'
motifs = utils.parseTable(dictFile,'\t')
TFlist = []
# for line in annotTable[1:]:
# for motif in motifs:
# if line[12].upper()==motif[1]:
# NMID = line[1]
# name = line[12].upper()
# new_line=[NMID,name]
# TFlist.append(new_line)
# utils.unParseTable(TFlist,'/storage/cylin/grail/projects/rasmc_all/tables/TFlist.txt','\t')
print('\n\n')
print('#======================================================================')
print('#======================IV. TF NETWORK ANALYSIS=========================')
print('#======================================================================')
print('\n\n')
#==========================================================================
#===================ENH TF GFF=============================================
#==========================================================================
print('Creating Brd4 Enhancer TF in degree table from crc rasmc_h3k27ac_0_tss_ENHANCER_TF_TABLE.txt')
# EnhTableFile = '%scrc/rasmc_h3k27ac_0_tss/rasmc_h3k27ac_0_tss_ENHANCER_TF_TABLE.txt' % (projectFolder)
# EnhTable = utils.parseTable(EnhTableFile,'\t')
# genesOfInterest=[]
# enhGFF = []
#for each region in cluster 1 (as determined by last column) add a gff line for the +/-50kb region
#e.g. gffLine = ['chr19','1_merged_rasmc_h3k27ac_1_lociStitched','',25834879-50000,25916534+50000,'','.','','1_merged_rasmc_h3k27ac_1_lociStitched']
# print(EnhTable[1])
# for line in EnhTable:
# if line != EnhTable[0]:
# gene = line[4]
# ID = line[0]
# chrom = line[1]
# TSS = ''
# start = line[2]
# stop = line[3]
# line_gff_format=[chrom, gene,ID,(int(start)-500),(int(stop)+500),'','.',TSS,gene]
# enhGFF.append(line_gff_format)
# enhGFF_Path = '%srasmc_h3k27ac_0_tss_BRD4_ENH_TF_IN_DEGREE.gff' % (gffFolder,)
# enhGFFTable = utils.unParseTable(enhGFF, enhGFF_Path, '\t')
#==========================================================================
#====================MAP BAMS BATCH========================================
#==========================================================================
print('Mapping Brd4 bams to Enhancer TF in degree gff')
# dataFile = chip_data_file
# gffList = ['/storage/cylin/grail/projects/rasmc_all/gff/rasmc_h3k27ac_0_tss_BRD4_ENH_TF_IN_DEGREE.gff']
# brd4NamesList = ['RASMC_WCE_UNSTIM_POOLED','RASMC_WCE_BRD4_UNSTIM_NEW','RASMC_WCE_PDGF_2H_POOLED','RASMC_WCE_BRD4_PDGF_2H_NEW','RASMC_WCE_PDGF_24H_POOLED','RASMC_WCE_PDGF_24H_NEW','RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW']
# pipeline_dfci.mapBamsBatch(dataFile,gffList,mappedFolder,overWrite =False,namesList = brd4NamesList,extension=200,rpm=False)
#==========================================================================
#====================MAKE SIGNAL TABLE=====================================
#==========================================================================
print('Making signal table for Brd4 Enhancer TF in degree')
dataFile = chip_data_file
gffFile = '/storage/cylin/grail/projects/rasmc_all/gff/rasmc_h3k27ac_0_tss_BRD4_ENH_TF_IN_DEGREE.gff'
# pipeline_dfci.makeSignalTable(dataFile,gffFile,mappedFolder,namesList = ['RASMC_WCE_UNSTIM_POOLED','RASMC_WCE_BRD4_UNSTIM_NEW','RASMC_WCE_PDGF_2H_POOLED','RASMC_WCE_BRD4_PDGF_2H_NEW','RASMC_WCE_PDGF_24H_POOLED','RASMC_WCE_PDGF_24H_NEW','RASMC_BRD4_UNSTIM_REP1','RASMC_BRD4_UNSTIM_NEW','RASMC_BRD4_PDGF_2H_REP2','RASMC_BRD4_PDGF_2H_NEW','RASMC_BRD4_PDGF_24H_REP2','RASMC_BRD4_PDGF_24H_NEW'],medianNorm=False,output ='/storage/cylin/grail/projects/rasmc_all/signalTables/rasmc_h3k27ac_0_tss_BRD4_ENH_TF_IN_DEGREE_signal_table.txt')
#==========================================================================
#===================EDGE TABLE GFF=========================================
#==========================================================================
print('Creating Edge Table GFF | |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural net layers for TensorFlow Fold.
Layers are a convenience rather than an integral part of Fold.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import inspect
import itertools
# import google3
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_fold.blocks.blocks
import tensorflow_fold.blocks.result_types as tdt
class Layer(tdt.IOBase):
"""A callable that accepts and returns nests of batched of tensors."""
def __init__(self, input_type=None, output_type=None, name_or_scope=None):
"""Creates the layer.
Args:
input_type: A type.
output_type: A type.
name_or_scope: A string or variable scope. If a string, a new variable
scope will be created by calling
[`create_variable_scope`](#create_variable_scope), with defaults
inherited from the current variable scope. If no caching device is set,
it will be set to `lambda op: op.device`. This is because `tf.while` can
be very inefficient if the variables it uses are not cached locally.
"""
if name_or_scope is None: name_or_scope = type(self).__name__
if isinstance(name_or_scope, tf.VariableScope):
self._vscope = name_or_scope
name = str(self._vscope.name)
elif isinstance(name_or_scope, six.string_types):
self._vscope = create_variable_scope(name_or_scope)
name = name_or_scope
else:
raise TypeError('name_or_scope must be a tf.VariableScope or a string: '
'%s' % (name_or_scope,))
if self._vscope.caching_device is None:
self._vscope.set_caching_device(lambda op: op.device)
super(Layer, self).__init__(input_type, output_type, name)
if not hasattr(self, '_constructor_name'):
self._constructor_name = '__.%s' % self.__class__.__name__
if not hasattr(self, '_constructor_args'):
self._constructor_args = None
if not hasattr(self, '_constructor_kwargs'):
self._constructor_kwargs = None
def set_constructor_args(self, name, args, kwargs):
"""Sets the constructor args used to pretty-print this layer.
Should be called by derived classes in __init__.
Args:
name: the fully qualified name of the constructor
args: a list of constructor arguments
kwargs: a list of (key,value,default) triples for keyword arguments
"""
self._constructor_name = name
self._constructor_args = args
self._constructor_kwargs = kwargs if kwargs is not None else []
@property
def constructor_name(self):
return self._constructor_name
@property
def constructor_args(self):
return self._constructor_args
@property
def constructor_kwargs(self):
return self._constructor_kwargs
def __rshift__(self, rhs):
return tensorflow_fold.blocks.blocks.Pipe(
self, rhs)
def __rrshift__(self, lhs):
return tensorflow_fold.blocks.blocks.Pipe(
lhs, self)
def create_variable_scope(name):
"""Creates a new variable scope based on `name`, nested in the current scope.
If `name` ends with a `/` then the new scope will be created exactly as if
you called `tf.variable_scope(name)`. Otherwise, `name` will be
made globally unique, in the context of the current graph (e.g.
`foo` will become `foo_1` if a `foo` variable scope already exists).
Args:
name: A non-empty string.
Returns:
A variable scope.
Raises:
TypeError: if `name` is not a string.
ValueError: if `name` is empty.
"""
if not isinstance(name, six.string_types):
raise TypeError('name must be a string: %s' % (name,))
if not name: raise ValueError('name must be non-empty')
if name.endswith('/'):
with tf.variable_scope(name) as scope:
return scope
current_scope_name = tf.get_variable_scope().name
if current_scope_name:
full_name = '%s/%s' % (current_scope_name, name)
else:
full_name = name
# We rely on the fact that every variable scope has a name scope
# with the exact same name, so a unique name scope is by
# implication also a unique name for a variable scope.
with tf.name_scope(None): # enter the root name scope
with tf.name_scope(full_name) as unique_name:
pass
if current_scope_name: unique_name = unique_name[len(current_scope_name)+1:]
with tf.variable_scope(unique_name[:-1]) as scope:
return scope
@six.add_metaclass(abc.ABCMeta)
class TensorToTensorLayer(Layer):
"""A set of TF variables and an associated Tensor -> Tensor function."""
def __init__(self, *args, **kwargs):
self._created_variables = False
super(TensorToTensorLayer, self).__init__(*args, **kwargs)
@abc.abstractmethod
def _create_variables(self):
"""Creates the variables associated with this layer.
Guaranteed to be called at most once, either when the layer's call operator
is invoked for the first time, in which case the input type will have been
set, or when the public method create_variables is called for the first
time. Scope will be set to this layer's vscope.
Raises:
TypeError: If `input_type` is invalid for this layer or isn't set.
"""
pass
@abc.abstractmethod
def _process_batch(self, batch):
"""Processes a batch of inputs using this layer; called in its vscope.
Args:
batch: A batch tensor for this layer's input type.
Returns:
A tensor of this layer's output type.
"""
pass
def __call__(self, batch):
"""Calls the function associated with this layer on a batch of inputs.
Creates the variables for this layer if they don't already exist.
Args:
batch: A batch tensor.
Returns:
A tensor of this layer's output type.
Raises:
ValueError: If the layer was previously called with a batch of a different
dtype or shape (not considering the leading dimension).
"""
self.set_input_type(
tdt.TensorType(batch.get_shape().as_list()[1:], batch.dtype))
self.create_variables()
with tf.variable_scope(self._vscope):
return self._process_batch(batch)
def create_variables(self):
"""Creates the variables for this layer if they don't already exist.
If the variables are created by this method rather than by calling the
layer, the input type may need to be set manually.
Raises:
TypeError: If the input type is invalid or unset.
"""
self._check_input_type()
with tf.variable_scope(self._vscope):
if not self._created_variables:
self._create_variables()
self._created_variables = True
class FC(TensorToTensorLayer):
"""A fully connected network layer.
Fully connected layers require a `float32` vector (i.e. 1D tensor) as input,
and build `float32` vector outputs. Layers can be applied to multiple inputs,
provided they all have the same shape.
For example, to apply the same hidden layer to two different input fields:
```python
layer = FC(100)
in = {'a': Vector(10), 'b': Vector(10)}
hidden = [in['a'] >> Call(layer), in['b'] >> Call(layer)] >> Concat()
out = hidden >> Call(FC(10, activation=None))
```
Attributes:
weights: The tensor for the weights of the FC layer.
bias: The tensor for the bias of the FC layer.
scales: The tensor for the scales of the FC layer if weight norm is enabled.
output_size: The size of the output as an integer.
"""
def __init__(self, num_units_out, activation=tf.nn.relu, initializer=None,
input_keep_prob=None, output_keep_prob=None,
normalization_fn=None, weight_norm=False, name=None):
"""Initializes the layer.
Args:
num_units_out: The number of output units in the layer.
activation: The activation function. Default is ReLU. Use `None` to get a
linear layer.
initializer: The initializer for the weights. Defaults to uniform unit
scaling with factor derived in <http://arxiv.org/pdf/1412.6558v3.pdf>
if activation is ReLU, ReLU6, tanh, or linear. Otherwise defaults to
truncated normal initialization with a standard deviation of 0.01.
input_keep_prob: Optional scalar float32 tensor for dropout on input.
Feed 1.0 at serving to disable dropout.
output_keep_prob: Optional scalar float32 tensor for dropout on output.
Feed 1.0 at serving to disable dropout.
normalization_fn: Optional normalization function that will be inserted
before nonlinearity.
weight_norm: A bool to control whether weight normalization is used. See
https://arxiv.org/abs/1602.07868 for how it works.
name: An optional string name. Defaults to `FC_%d % num_units_out`. Used
to name the variable scope where the variables for the layer live.
"""
self.set_constructor_args('td.FC', *get_local_arguments(FC.__init__, True))
if not initializer:
# TODO(SamEisenstat): This constant is calibrated for ReLU, something else
# might be better for ReLU6.
if activation in [tf.nn.relu, tf.nn.relu6]:
initializer = tf.uniform_unit_scaling_initializer(1.43)
elif activation == tf.tanh:
initializer = tf.uniform_unit_scaling_initializer(1.15)
elif not activation:
initializer = tf.uniform_unit_scaling_initializer(1.0)
else:
initializer = tf.truncated_normal_initializer(stddev=0.01)
self._activation = activation
self._initializer = initializer
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._normalization_fn = normalization_fn
self._weight_norm = weight_norm
if name is None: name = 'FC_%d' % num_units_out
super(FC, self).__init__(
output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
@property
def bias(self):
if not self._created_variables:
raise RuntimeError('bias have not been created; call the layer first')
return self._bias
@property
def weights(self):
if not self._created_variables:
raise RuntimeError('weights have not been created; call the layer first')
return self._weights
@property
def scales(self):
if not self._created_variables:
raise RuntimeError('scales have not been created; call the layer first')
return self._scales
@property
def output_size(self):
return self.output_type.shape[0]
@property
def weight_norm(self):
return self._weight_norm
def _create_variables(self):
if self.input_type.dtype != 'float32':
raise TypeError('FC input dtype must be float32: %s' %
self.input_type.dtype)
if self.input_type.ndim != 1:
raise TypeError('FC input shape must be 1D: %s' %
str(self.input_type.shape))
self._bias | |
e:
apkData["callgraph"] = None
if saveInfo:
prettyPrint("Saving extracted info to \"%s\"" % destination, "debug")
if not os.path.exists(destination):
prettyPrint("Could not find the temporary directory \"%s\". Saving aborted" % destination, "warning")
return apk, dex, vm, apkData
else:
open("%s/data.txt" % destination, "w").write(str(apkData))
if infoLevel >= 4:
if apkData["callgraph"] != None:
nx.write_gpickle(callgraph, "%s/call_graph.gpickle" % destination)
except exceptions.RuntimeError as re:
prettyPrintError(re)
except Exception as e:
prettyPrintError(e)
return None, None, None, {}
return apk, dex, vm, apkData
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
def getPackageNameFromAPK(apkPath):
"""
Retrieves the package name from an APK using AAPT
:param apkPath: The path to the APK archive to process
:type apkPath: str
:return: A string depicting the retrieved packaged name
"""
try:
pkg_cmd = ["aapt", "dump", "badging", apkPath]
pkg_cmd_output = subprocess.Popen(pkg_cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]
magic = "package: name='"
index = pkg_cmd_output.find(magic)+len(magic)
app_pkg = pkg_cmd_output[index:pkg_cmd_output.find("'", index)].replace(" ", "")
except Exception as e:
prettyPrintError(e)
return ""
return app_pkg
def getVTLabel(VTReportKey, labeling="vt1-vt1"):
"""
Figures out the label of an app according to its VirusTotal and the passed label
:param VTReportKey: The key used to look for the report (i.e., the SHA256 hash of the app)
:type VTReportKey: str
:param labeling:
:type labeling:
:return: an int depicting the class of the app according to the adopted labeling scheme (1 for malicious, 0 for benign, -1 for unknown)
"""
try:
# Retrieve the APK's label according to a labeling scheme
targetLabel = -1
if os.path.exists("%s/%s.report" % (VT_REPORTS_DIR, VTReportKey)):
report = eval(open("%s/%s.report" % (VT_REPORTS_DIR, VTReportKey)).read())
#prettyPrint("VirusTotal report \"%s.report\" found" % targetKey, "debug")
if "positives" in report.keys():
if labeling == "old":
if "additional_info" in report.keys():
if "positives_delta" in report["additional_info"].keys():
targetLabel = 1 if report["positives"] - report["additional_info"]["positives_delta"] >= 1 else 0
elif labeling == "vt1-vt1":
targetLabel = 1 if report["positives"] >= 1 else 0
elif labeling == "vt50p-vt50p":
targetLabel = 1 if report["positives"]/float(report["total"]) >= 0.5 else 0
elif labeling == "vt50p-vt1":
if report["positives"]/float(report["total"]) >= 0.5:
targetLabel = 1
elif report["positives"] == 0:
targetLabel = 0
else:
targetLabel = -1
except Exception as e:
prettyPrintError(e)
return -1
return targetLabel
def getVTReport(VTAPIKey, VTHash, allinfo="true"):
"""
Download the report corresponding to a hash from VirusTotal
:param VTAPIKey: The VirusTotal API key needed to download the report
:type VTAPIKey: str
:param VTHash: The SHA1 or SHA256 hash of the resource
:type VTHash: str
:param allinfo: Whether to download the full or short report from VirusTotal (true [Default]/false)
:type allinfo: str
:return: A dict containing the report downloaded from VirusTotal
"""
try:
URL = "https://www.virustotal.com/vtapi/v2/file/report?apikey=%s&resource=%s&allinfo=%s" % (VTAPIKey, VTHash, allinfo)
response = requests.get(URL).text
if len(response) > 0:
return json.loads(response)
except Exception as e:
print "[*] Error encountered: %s" % e
return {}
def injectBehaviorInTrace(targetTrace, insertionProbability, multipleBehaviors=False):
"""
Injects malicious blocks of pre-defined malicious behaviors into a target trace with a the likelihood of [insertionProbability]
:param targetTrace: The trace to inject the behaviors in
:type targetTrace: list
:param insertionProbability: The probability with which behaviors are injected into the target trace
:type insertionProbability: float
:param multipleBehaviors: Whether to inject different behaviors in the same target trace
:type multipleBehaviors: bool
:return: A list depicting the new trace with the inserted behavior(s)
"""
try:
newTrace = []
# Retrieve store behaviors
behaviors = loadMaliciousBehaviors()
# Iterate over the target trace and inject the malicious behaviors
constantBehavior = behaviors[random.randint(0, len(behaviors)-1)] if not multipleBehaviors else ""
currentIndex = 0
# Find insertion points and behaviors
positions = []
while currentIndex < len(targetTrace):
if flip(insertionProbability) == "YES":
b = constantBehavior if constantBehavior != "" else behaviors[random.randint(0, len(behaviors)-1)]
# Insert behavior
positions.append((currentIndex+1, b))
# Update current index
currentIndex = currentIndex + len(b) + 1
# Insert behaviors in positions
print positions
newTrace = [] + targetTrace
if len(positions) > 0:
for p in positions:
before = newTrace[:p[0]]
after = newTrace[p[0]:]
middle = ["%s()" % i for i in p[1]]
before.extend(middle)
newTrace = before+after
except Exception as e:
prettyPrintError(e)
return []
return newTrace
def loadNumericalFeatures(featuresFile, delimiter=","):
"""
Loads numerical features from a file and returns a list
:param featuresFile: The file containing the feature vector
:type featuresFile: str
:param delimiter: The character separating numerical features
:type delimiter: str
"""
try:
if not os.path.exists(featuresFile):
prettyPrint("Unable to find the features file \"%s\"" % featuresFile, "warning")
return []
content = open(featuresFile).read()
if content.lower().find("[") != -1 and content.lower().find("]") != -1:
features = eval(content)
else:
features = [float(f) for f in content.replace(' ','').split(delimiter)]
except Exception as e:
prettyPrintError(e)
return []
return features
def loadMaliciousBehaviors():
"""
Loads malicious behaviors from the database
return: A list of malicious behaviors stored in the database
"""
try:
MaatDB = DB()
cursor = MaatDB.select([], "behaviors")
behaviors = cursor.fetchall()
if len(behaviors) < 1:
prettyPrint("Could not retrieve malicious behaviors from the database. Inserting behaviors in \"%s\"" % MALICIOUS_BEHAVIORS, "warning")
content = open(MALICIOUS_BEHAVIORS).read().split('\n')
if len(content) < 1:
prettyPrint("Could not retrieve any behaviors from \"%s\"" % MALCIOUS_BEHAVIORS, "error")
return []
for line in content:
if len(line) > 1:
desc = line.split(':')[0]
sequence = line.split(':')[1].replace(' ','')
timestamp = getTimeStamp(includeDate=True)
MaatDB.insert("behaviors", ["bDesc", "bSequence", "bTimestamp"], [desc, sequence, timestamp])
# Lazy guarantee of same data format
cursor = MaatDB.select([], "behaviors")
behaviors = cursor.fetchall()
except Exception as e:
prettyPrintError(e)
return []
return behaviors
def logEvent(msg):
try:
open(LOG_FILE, "w+").write(msg)
except Exception as e:
prettyPrintError(e)
return False
return True
def matchAPKs(sourceAPK, targetAPKs, matchingDepth=1, matchingThreshold=0.67, matchWith=10, useSimiDroid=False, fastSearch=True, matchingTimeout=500, labeling="vt1-vt1", useLookup=False):
"""
Compares and attempts to match two APK's and returns a similarity measure
:param sourceAPK: The path to the source APK (the original app you wish to match)
:type sourceAPK: str
:param targetAPK: The path to the directory containing target APKs (against which you wish to match)
:type targetAPK: str
:param matchingDepth: The depth and rigorosity of the matching (between 1 and 4)
:type matchingDepth: int
:param matchingThreshold: A similarity percentage above which apps are considered similar
:type matchingThreshold: float
:param matchWith: The number of matchings to return (default: 1)
:type matchWith: int
:param useSimiDroid: Whether to use SimiDroid to perform the comparison
:type useSimiDroid: boolean
:param fastSearch: Whether to return matchings one maximum number of matches [matchWith] is reached
:type fastSearch: boolean
:param matchingTimeout: The time (in seconds) to allow the matching process to continue
:type matchingTimeoue: int
:param labeling: The labeling scheme adopted to label APK's as malicious and benign
:type labeling: str
:param useLookup: Whether to skip analyzing every app and depend on lookup structs to hasten the experiments
:type useLookup: boolean
:return: A list of tuples (str, (float, float) depicting the matched app, the similarity measure and the matched app's label
"""
try:
similarity = 0.0
# Get the target apps
targetApps = glob.glob("%s/*" % targetAPKs) if useSimiDroid == False else glob.glob("%s/*.apk" % targetAPKs)
# Randomize?
random.shuffle(targetApps)
if len(targetApps) < 1:
prettyPrint("Could not retrieve any APK's or directories from \"%s\"" % targetApps, "error")
return []
prettyPrint("Successfully retrieved %s apps from \"%s\"" % (len(targetApps), targetAPKs))
# Retrieve information from the source APK
if not useSimiDroid:
sourceKey = sourceAPK[sourceAPK.rfind("/")+1:].replace(".apk", "")
if useLookup:
infoDir = targetApps[0][:targetApps[0].rfind("/")]
if os.path.exists("%s/%s_data" % (infoDir, sourceKey)):
sourceInfo = eval(open("%s/%s_data/data.txt" % (infoDir, sourceKey)).read())
else:
prettyPrint("No lookup info found. Extracting app info", "warning")
sourceInfo = extractAPKInfo(sourceAPK, matchingDepth)[-1]
else:
sourceInfo = extractAPKInfo(sourceAPK, matchingDepth)[-1]
if len(sourceInfo) < 1:
prettyPrint("Could not extract any info from \"%s\"" % sourceAPK, "error")
return []
matchings = {}
counter = 0
startTime = time.time()
for targetAPK in targetApps:
counter += 1
# Timeout?
if counter >= matchingTimeout:
prettyPrint("Matching timeout", "error")
return sortDictByValue(matchings, True)
prettyPrint("Matching with \"%s\", #%s out of %s" % (targetAPK, counter, matchingTimeout), "debug")
if useSimiDroid == False:
# Use homemade recipe to perform the comparison
if not os.path.exists("%s/data.txt" % targetAPK):
prettyPrint("Could not find a \"data.txt\" file for app \"%s\". Skipping" % targetAPK, "warning")
continue
# Load pre-extracted target app information
try:
targetInfo = eval(open("%s/data.txt" % targetAPK).read())
except Exception as e:
prettyPrint("Could not load target info. Skipping", "warning")
continue
# Retrieve the | |
<filename>datamart_isi/utilities/utils.py
import typing
import pandas as pd
import os
import requests
import json
import sys
import argparse
import logging
import copy
from functools import wraps
from d3m.metadata.base import ALL_ELEMENTS
from datamart_isi.config import cache_file_storage_base_loc
from datamart_isi.utilities import connection
from datamart_isi.cache.wikidata_cache import QueryCache
from dsbox.datapreprocessing.cleaner.data_profile import Profiler, Hyperparams as ProfilerHyperparams
from dsbox.datapreprocessing.cleaner.cleaning_featurizer import CleaningFeaturizer, CleaningFeaturizerHyperparameter
_logger = logging.getLogger(__name__)
seed_dataset_store_location = os.path.join(cache_file_storage_base_loc, "datasets_cache")
WIKIDATA_CACHE_MANAGER = QueryCache()
WIKIDATA_SERVER = connection.get_wikidata_server_url()
class Utils:
DEFAULT_DESCRIPTION = {
"materialization": {
"python_path": "default_materializer"
},
"variables": []
}
@staticmethod
def get_node_name(node_code) -> str:
"""
Function used to get the properties(P nodes) names with given P node
:param node_code: a str indicate the P node (e.g. "P123")
:return: a str indicate the P node label (e.g. "inception")
"""
sparql_query = "SELECT DISTINCT ?x WHERE \n { \n" + \
"wd:" + node_code + " rdfs:label ?x .\n FILTER(LANG(?x) = 'en') \n} "
try:
results = WIKIDATA_CACHE_MANAGER.get_result(sparql_query)
return results[0]['x']['value']
except:
return node_code
@staticmethod
def calculate_dsbox_features(data: pd.DataFrame, metadata: typing.Union[dict, None],
selected_columns: typing.Set[int] = None) -> dict:
"""Calculate dsbox features, add to metadata dictionary
Args:
data: dataset as a pandas dataframe
metadata: metadata dict
Returns:
updated metadata dict
"""
from datamart_isi.profilers.dsbox_profiler import DSboxProfiler
if not metadata:
return metadata
return DSboxProfiler().profile(inputs=data, metadata=metadata, selected_columns=selected_columns)
@classmethod
def generate_metadata_from_dataframe(cls, data: pd.DataFrame, original_meta: dict = None) -> dict:
"""Generate a default metadata just from the data, without the dataset schema
Args:
data: pandas DataFrame
Returns:
metadata dict
"""
from datamart_isi.profilers.basic_profiler import BasicProfiler, GlobalMetadata
global_metadata = GlobalMetadata.construct_global(description=cls.DEFAULT_DESCRIPTION)
global_metadata = BasicProfiler.basic_profiling_entire(global_metadata=global_metadata, data=data)
metadata_dict = global_metadata.value
# for col_offset in range(data.shape[1]):
# variable_metadata = BasicProfiler.basic_profiling_column(
# description={},
# variable_metadata=VariableMetadata.construct_variable(description={}),
# column=data.iloc[:, col_offset]
# )
# global_metadata.add_variable_metadata(variable_metadata)
hyper1 = ProfilerHyperparams.defaults()
hyper2 = CleaningFeaturizerHyperparameter.defaults()
clean_f = CleaningFeaturizer(hyperparams=hyper2)
profiler = Profiler(hyperparams=hyper1)
profiler.set_training_data(inputs=data)
profiler.fit()
profiled_df = profiler.produce(inputs=data).value
clean_f.set_training_data(inputs=profiled_df)
clean_f.fit()
cleaned_df = clean_f.produce(inputs=profiled_df).value
cleaned_df_metadata = cleaned_df.metadata
for i in range(data.shape[1]):
each_column_metadata = cleaned_df_metadata.query((ALL_ELEMENTS, i))
column_name = data.columns[i]
if "datetime" in data.iloc[:, i].dtype.name:
semantic_type = ("http://schema.org/DateTime", 'https://metadata.datadrivendiscovery.org/types/Attribute')
else:
semantic_type = each_column_metadata['semantic_types']
variable_metadata = {'datamart_id': None,
'semantic_type': semantic_type,
'name': column_name,
'description': 'column name: {}, dtype: {}'.format(column_name, cleaned_df.iloc[:, i].dtype.name)
}
metadata_dict['variables'].append(variable_metadata)
if original_meta:
metadata_dict.update(original_meta)
return metadata_dict
@staticmethod
def get_time_granularity(time_column: pd.DataFrame) -> str:
if "datetime" not in time_column.dtype.name:
try:
time_column = pd.to_datetime(time_column)
except:
raise ValueError("Can't parse given time column!")
if len(time_column.unique()) == 1:
allow_duplicate_amount = 0
else:
allow_duplicate_amount = 1
time_granularity = 'second'
if any(time_column.dt.minute != 0) and len(time_column.dt.minute.unique()) > allow_duplicate_amount:
time_granularity = 'minute'
elif any(time_column.dt.hour != 0) and len(time_column.dt.hour.unique()) > allow_duplicate_amount:
time_granularity = 'hour'
elif any(time_column.dt.day != 0) and len(time_column.dt.day.unique()) > allow_duplicate_amount:
# it is also possible weekly data
is_weekly_data = True
time_column_sorted = time_column.sort_values()
temp1 = time_column_sorted.iloc[0]
for i in range(1, len(time_column_sorted)):
temp2 = time_column_sorted.iloc[i]
if (temp2 - temp1).days != 7:
is_weekly_data = False
break
if is_weekly_data:
time_granularity = 'week'
else:
time_granularity = 'day'
elif any(time_column.dt.month != 0) and len(time_column.dt.month.unique()) > allow_duplicate_amount:
time_granularity = 'month'
elif any(time_column.dt.year != 0) and len(time_column.dt.year.unique()) > allow_duplicate_amount:
time_granularity = 'year'
else:
_logger.error("Can't guess the time granularity for this dataset! Will use as second")
return time_granularity
@staticmethod
def map_granularity_to_d3m_format(granularity: str):
"""
d3m allowed following granularities:
"timeGranularity":{"type":"dict", "required":false, "schema":{
"value":{"type":"number", "required":true},
"units":{"type":"string", "required":true, "allowed":[
"seconds",
"minutes",
"days",
"weeks",
"years",
"unspecified"
]
}
:param granularity:
:return: a list follow d3m format
"""
if "second" in granularity:
return [('value', 1), ('unit', 'seconds')]
elif "minute" in granularity:
return [('value', 1), ('unit', 'minutes')]
elif "hour" in granularity:
return [('value', 1), ('unit', 'hours')]
elif "day" in granularity:
return [('value', 1), ('unit', 'days')]
elif "week" in granularity:
return [('value', 1), ('unit', 'weeks')]
# how about months??
elif "month" in granularity:
return [('value', 1), ('unit', 'months')]
elif "year" in granularity:
return [('value', 1), ('unit', 'years')]
else:
raise ValueError("Unrecognized granularity")
@staticmethod
def map_granularity_to_value(granularity_str: str) -> int:
TemporalGranularity = {
'second': 14,
'minute': 13,
'hour': 12,
'day': 11,
'month': 10,
'year': 9
}
if granularity_str.lower() in TemporalGranularity:
return TemporalGranularity[granularity_str.lower()]
else:
raise ValueError("Can't find corresponding granularity value.")
@staticmethod
def map_d3m_granularity_to_value(granularity_str: str) -> int:
"""
a simple dict map which map time granulairty string to wikidata int format
:param granularity_str:
:return:
"""
TemporalGranularity = {
'seconds': 14,
'minutes': 13,
'hours': 12,
'days': 11,
'weeks': 11, # now also use week as days
'months': 10,
'years': 9,
'unspecified': 8,
}
if granularity_str.lower() in TemporalGranularity:
return TemporalGranularity[granularity_str.lower()]
else:
raise ValueError("Can't find corresponding granularity value.")
@staticmethod
def time_granularity_value_to_stringfy_time_format(granularity_int: int) -> str:
try:
granularity_int = int(granularity_int)
except ValueError:
raise ValueError("The given granulairty is not int format!")
granularity_dict = {
14: "%Y-%m-%d %H:%M:%S",
13: "%Y-%m-%d %H:%M",
12: "%Y-%m-%d %H",
11: "%Y-%m-%d",
10: "%Y-%m",
9: "%Y"
}
if granularity_int in granularity_dict:
return granularity_dict[granularity_int]
else:
_logger.warning("Unknown time granularity value as {}! Will use second level.".format(str(granularity_int)))
return granularity_dict[14]
@staticmethod
def overlap(first_inter, second_inter) -> bool:
"""
function used to check whether two time intervals has overlap
:param first_inter: [start_time, end_time]
:param second_inter: [start_time, end_time]
:return: a bool value indicate has overlap or not
"""
for f, s in ((first_inter, second_inter), (second_inter, first_inter)):
# will check both ways
for time in (f[0], f[1]):
if s[0] <= time <= s[1]:
return True
else:
return False
@staticmethod
def keywords_augmentation(keywords: typing.List[str], server_address: str = None) -> typing.List[str]:
"""
function that use fuzzy search to get more related keywords
:param server_address: the request server address
:param keywords: a list of keywords
:return:
"""
if not server_address:
server_address = connection.get_keywords_augmentation_server_url()
url = server_address + "/" + ",".join(keywords)
resp = requests.get(url)
if resp.status_code // 100 == 2:
new_keywords = json.loads(resp.text)['message'].split(",")
_logger.info("Get augmented keywords as {}".format(str(new_keywords)))
else:
new_keywords = keywords
_logger.warning("Failed on augmenting keywords! Please check the service condition!")
return new_keywords
@staticmethod
def qgram_tokenizer(x, _q):
if len(x) < _q:
return [x]
return [x[i:i + _q] + "*" for i in range(len(x) - _q + 1)]
@staticmethod
def trigram_tokenizer(x):
return Utils.qgram_tokenizer(x, 3)
@staticmethod
def join_datasets_by_files(files: typing.List[typing.Union[str, pd.DataFrame]], how: str = "left") -> pd.DataFrame:
"""
:param how: the method to join the dataframe, {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘left’
How to handle the operation of the two objects.
left: use calling frame’s index (or column if on is specified)
right: use other’s index.
outer: form union of calling frame’s index (or column if on is specified) with other’s index, and sort it. lexicographically.
inner: form intersection of calling frame’s index (or column if on is specified) with other’s index, preserving the order of the calling’s one.
:param files: either a path to csv or a DataFrame directly
:return: a joined DataFrame object
"""
if not isinstance(files, list):
raise ValueError("Input must be a list of files")
if len(files) < 2:
raise ValueError("Input files amount must be larger than 2")
_logger.info("Totally {} files.".format(str(len(files))))
necessary_column_names = {"region_wikidata", "precision", "time"}
ignore_column_names = {"region_wikidata", "precision", "time", "variable_name", "variable", "region_Label", "calendar",
"productLabel", "qualityLabel"}
loaded_dataframes = []
for i, each in enumerate(files):
if isinstance(each, str):
try:
temp_loaded_df = pd.read_csv(each)
except Exception as e:
_logger.warning("Failed on loading dataframe No.{}".format(str(i)))
_logger.error(str(e))
continue
elif isinstance(each, pd.DataFrame):
temp_loaded_df = each
else:
_logger.warning("Unsupported format '{}' on No.{} input, will ignore.".format(str(type(each)), str(i)))
continue
temp_loaded_df = temp_loaded_df.dropna(subset=['region_wikidata'], inplace=False)
if len(set(temp_loaded_df.columns.tolist()).intersection(necessary_column_names)) != len(necessary_column_names):
_logger.error("Following columns {} are necessary to be exists".format(str(necessary_column_names)))
raise ValueError("Not all columns found on given No.{} datasets.")
loaded_dataframes.append(temp_loaded_df)
# use first input df as base df
output_df = copy.deepcopy(loaded_dataframes[0])
source_precision = output_df['precision'].iloc[0]
# transfer the datetime format to ensure format match
time_stringfy_format = Utils.time_granularity_value_to_stringfy_time_format(source_precision)
output_df['time'] = pd.to_datetime(output_df['time']).dt.strftime(
time_stringfy_format)
for i, each_loaded_df in enumerate(loaded_dataframes[1:]):
current_precision = each_loaded_df['precision'].iloc[0]
if source_precision != current_precision:
left_join_columns = ["region_wikidata"]
right_join_columns = ["region_wikidata"]
else:
left_join_columns = ["region_wikidata", "time"]
right_join_columns = ["region_wikidata", "time"]
each_loaded_df['time'] = pd.to_datetime(each_loaded_df['time']).dt.strftime(time_stringfy_format)
possible_name = []
for each_col_name in each_loaded_df.columns:
if each_col_name not in ignore_column_names and "label" not in each_col_name.lower():
possible_name.append(each_col_name)
if len(possible_name) != 1:
_logger.error("get multiple possible name???")
_logger.error(str(each_loaded_df.columns))
_logger.error("???")
# import pdb
# pdb.set_trace()
right_needed_columns = right_join_columns + [possible_name[0]]
print(str(right_needed_columns))
right_join_df = each_loaded_df[right_needed_columns]
output_df = pd.merge(left=output_df, right=right_join_df,
left_on=left_join_columns, right_on=right_join_columns,
how=how)
if len(output_df) == 0:
_logger.error("Get 0 rows after join with No.{} DataFrame".format(str(i + 1)))
return output_df
def main(argv: typing.Sequence) -> None:
parser = argparse.ArgumentParser(prog='datamart_isi', description="Run ISI datamart utils command.")
subparsers = parser.add_subparsers(dest='commands', title='commands')
# define join parser
join_parser = subparsers.add_parser(
'join', help="join ethiopia related datasets directly by commands",
description="Join datasets",
)
| |
4.4893e-01,
1235.0: 4.4409e-01,
1236.0: 4.4795e-01,
1237.0: 4.4259e-01,
1238.0: 4.4694e-01,
1239.0: 4.4194e-01,
1240.0: 4.4011e-01,
1241.0: 4.4130e-01,
1242.0: 4.4179e-01,
1243.0: 4.3712e-01,
1244.0: 4.3499e-01,
1245.0: 4.3622e-01,
1246.0: 4.3902e-01,
1247.0: 4.3715e-01,
1248.0: 4.3828e-01,
1249.0: 4.3930e-01,
1250.0: 4.3684e-01,
1251.0: 4.3260e-01,
1252.0: 4.3106e-01,
1253.0: 4.2803e-01,
1254.0: 4.2416e-01,
1255.0: 4.3088e-01,
1256.0: 4.2096e-01,
1257.0: 4.1630e-01,
1258.0: 4.2549e-01,
1259.0: 4.0868e-01,
1260.0: 4.1235e-01,
1261.0: 3.9371e-01,
1262.0: 3.7867e-01,
1263.0: 3.8300e-01,
1264.0: 3.5568e-01,
1265.0: 3.7871e-01,
1266.0: 3.6881e-01,
1267.0: 3.7159e-01,
1268.0: 3.5475e-01,
1269.0: 2.3656e-01,
1270.0: 3.7087e-01,
1271.0: 3.9062e-01,
1272.0: 3.9114e-01,
1273.0: 3.8874e-01,
1274.0: 3.8864e-01,
1275.0: 3.9455e-01,
1276.0: 3.9895e-01,
1277.0: 4.0191e-01,
1278.0: 4.0916e-01,
1279.0: 4.0626e-01,
1280.0: 4.0387e-01,
1281.0: 3.9554e-01,
1282.0: 3.5695e-01,
1283.0: 3.8978e-01,
1284.0: 4.0268e-01,
1285.0: 4.0577e-01,
1286.0: 4.0878e-01,
1287.0: 4.0405e-01,
1288.0: 4.0192e-01,
1289.0: 3.9194e-01,
1290.0: 3.9522e-01,
1291.0: 4.0004e-01,
1292.0: 3.7946e-01,
1293.0: 3.9506e-01,
1294.0: 3.8709e-01,
1295.0: 3.8801e-01,
1296.0: 3.7322e-01,
1297.0: 3.5583e-01,
1298.0: 3.7536e-01,
1299.0: 3.9127e-01,
1300.0: 3.3855e-01,
1301.0: 3.4728e-01,
1302.0: 3.7539e-01,
1303.0: 3.3197e-01,
1304.0: 2.8849e-01,
1305.0: 3.6783e-01,
1306.0: 3.6853e-01,
1307.0: 2.9362e-01,
1308.0: 3.3277e-01,
1309.0: 3.6822e-01,
1310.0: 2.8908e-01,
1311.0: 3.2012e-01,
1312.0: 3.1986e-01,
1313.0: 3.0089e-01,
1314.0: 2.7690e-01,
1315.0: 2.7447e-01,
1316.0: 3.1113e-01,
1317.0: 2.9969e-01,
1318.0: 3.1984e-01,
1319.0: 2.5803e-01,
1320.0: 2.4864e-01,
1321.0: 2.8684e-01,
1322.0: 2.9023e-01,
1323.0: 2.2386e-01,
1324.0: 2.5231e-01,
1325.0: 3.0943e-01,
1326.0: 2.6956e-01,
1327.0: 2.5593e-01,
1328.0: 2.2555e-01,
1329.0: 1.7097e-01,
1330.0: 2.2052e-01,
1331.0: 1.3951e-01,
1332.0: 1.4046e-01,
1333.0: 1.9545e-01,
1334.0: 1.6302e-01,
1335.0: 2.2244e-01,
1336.0: 1.7670e-01,
1337.0: 1.5852e-01,
1338.0: 1.7151e-01,
1339.0: 1.7033e-01,
1340.0: 1.6216e-01,
1341.0: 1.6419e-01,
1342.0: 1.7149e-01,
1343.0: 1.2259e-01,
1344.0: 7.3018e-02,
1345.0: 1.0521e-01,
1346.0: 5.6189e-02,
1347.0: 5.8058e-02,
1348.0: 4.5862e-03,
1349.0: 1.5617e-02,
1350.0: 1.5488e-02,
1351.0: 4.4759e-03,
1352.0: 1.4661e-03,
1353.0: 9.2918e-05,
1354.0: 2.8051e-04,
1355.0: 3.4847e-06,
1356.0: 4.6489e-05,
1357.0: 6.9429e-05,
1358.0: 4.0575e-06,
1359.0: 7.1040e-07,
1360.0: 2.0706e-06,
1361.0: 4.6566e-09,
1362.0: 1.7489e-11,
1363.0: 3.0540e-06,
1364.0: 1.3150e-06,
1365.0: 8.7833e-12,
1366.0: 1.2379e-05,
1367.0: 4.8161e-06,
1368.0: 1.4311e-13,
1369.0: 5.0008e-07,
1370.0: 2.8266e-07,
1371.0: 1.9101e-08,
1372.0: 2.6623e-06,
1373.0: 4.2991e-05,
1374.0: 1.7350e-04,
1375.0: 3.1309e-04,
1376.0: 2.4935e-04,
1377.0: 1.1883e-04,
1378.0: 1.0741e-03,
1379.0: 5.0533e-05,
1380.0: 7.9042e-05,
1381.0: 2.2978e-06,
1382.0: 2.4874e-06,
1383.0: 4.2653e-08,
1384.0: 5.9782e-07,
1385.0: 2.0255e-06,
1386.0: 2.4441e-06,
1387.0: 1.9288e-04,
1388.0: 3.9037e-06,
1389.0: 5.6338e-04,
1390.0: 4.7836e-04,
1391.0: 3.3345e-04,
1392.0: 2.3065e-05,
1393.0: 1.1238e-04,
1394.0: 7.3268e-05,
1395.0: 6.5137e-07,
1396.0: 6.1338e-09,
1397.0: 4.7605e-05,
1398.0: 1.2329e-03,
1399.0: 7.8835e-04,
1400.0: 3.1513e-09,
1401.0: 1.0219e-08,
1402.0: 1.7817e-03,
1403.0: 2.3108e-03,
1404.0: 7.1755e-04,
1405.0: 3.5395e-07,
1406.0: 1.9861e-03,
1407.0: 1.6957e-04,
1408.0: 1.6023e-03,
1409.0: 6.0159e-04,
1410.0: 4.5332e-04,
1411.0: 2.0544e-03,
1412.0: 2.5650e-03,
1413.0: 2.2690e-02,
1414.0: 3.5359e-04,
1415.0: 1.7854e-04,
1416.0: 3.4561e-02,
1417.0: 1.1431e-02,
1418.0: 1.3182e-02,
1419.0: 2.0851e-03,
1420.0: 8.0437e-03,
1421.0: 8.9117e-03,
1422.0: 4.5023e-02,
1423.0: 8.9676e-03,
1424.0: 1.6511e-02,
1425.0: 2.5142e-02,
1426.0: 2.7032e-02,
1427.0: 4.8180e-02,
1428.0: 4.4360e-03,
1429.0: 3.6985e-02,
1430.0: 5.9912e-02,
1431.0: 4.8792e-02,
1432.0: 2.4524e-03,
1433.0: 3.4870e-02,
1434.0: 2.0403e-02,
1435.0: 2.0847e-02,
1436.0: 3.7326e-02,
1437.0: 2.9085e-02,
1438.0: 1.2913e-02,
1439.0: 4.9672e-02,
1440.0: 3.8547e-02,
1441.0: 3.0959e-02,
1442.0: 3.5356e-02,
1443.0: 4.3868e-02,
1444.0: 6.0143e-02,
1445.0: 4.8434e-02,
1446.0: 2.2492e-02,
1447.0: 3.5265e-02,
1448.0: 1.1254e-01,
1449.0: 9.9374e-02,
1450.0: 2.6699e-02,
1451.0: 1.0980e-02,
1452.0: 6.0718e-02,
1453.0: 7.9803e-02,
1454.0: 1.3384e-01,
1455.0: 6.4409e-02,
1456.0: 8.6158e-02,
1457.0: 1.1386e-01,
1458.0: 1.3273e-01,
1459.0: 1.5860e-01,
1460.0: 8.3161e-02,
1461.0: 8.7886e-02,
1462.0: 1.2708e-01,
1463.0: 4.2106e-02,
1464.0: 1.4770e-01,
1465.0: 9.0911e-02,
1466.0: 6.3498e-02,
1467.0: 3.5128e-02,
1468.0: 7.4928e-02,
1469.0: 9.2344e-02,
1470.0: 4.8397e-02,
1471.0: 1.7394e-02,
1472.0: 4.5566e-02,
1473.0: 6.8368e-02,
1474.0: 9.4765e-02,
1475.0: 1.7954e-01,
1476.0: 6.6987e-02,
1477.0: 6.7916e-02,
1478.0: 6.1825e-02,
1479.0: 1.1680e-01,
1480.0: 5.9063e-02,
1481.0: 1.1221e-01,
1482.0: 5.6967e-02,
1483.0: 1.4454e-01,
1484.0: 1.3375e-01,
1485.0: 1.2168e-01,
1486.0: 1.2008e-01,
1487.0: 5.9042e-02,
1488.0: 9.1654e-02,
1489.0: 1.8443e-01,
1490.0: 1.6993e-01,
1491.0: 1.9222e-01,
1492.0: 1.5986e-01,
1493.0: 1.7651e-01,
1494.0: 1.9794e-01,
1495.0: 1.7745e-01,
1496.0: 1.6385e-01,
1497.0: 2.2198e-01,
1498.0: 1.8437e-01,
1499.0: 2.1141e-01,
1500.0: 2.4339e-01,
1501.0: 2.5782e-01,
1502.0: 2.2688e-01,
1503.0: 1.7972e-01,
1504.0: 1.5586e-01,
1505.0: 1.7885e-01,
1506.0: 2.5026e-01,
1507.0: 2.4779e-01,
1508.0: 2.3606e-01,
1509.0: 1.8169e-01,
1510.0: 2.6269e-01,
1511.0: 2.5710e-01,
1512.0: 2.5315e-01,
1513.0: 2.3544e-01,
1514.0: 2.1930e-01,
1515.0: 2.5804e-01,
1516.0: 2.4943e-01,
1517.0: 2.4214e-01,
1518.0: 2.4489e-01,
1519.0: 2.3739e-01,
1520.0: 2.5688e-01,
1521.0: 2.6707e-01,
1522.0: 2.5617e-01,
1523.0: 2.7192e-01,
1524.0: 2.6743e-01,
1525.0: 2.5140e-01,
1526.0: 2.5977e-01,
1527.0: 2.5468e-01,
1528.0: 2.7122e-01,
1529.0: 2.6460e-01,
1530.0: 2.4789e-01,
1531.0: 2.6200e-01,
1532.0: 2.7039e-01,
1533.0: 2.6918e-01,
1534.0: 2.6122e-01,
1535.0: 2.5924e-01,
1536.0: 2.6679e-01,
1537.0: 2.6558e-01,
1538.0: 2.6427e-01,
1539.0: 2.6519e-01,
1540.0: 2.5737e-01,
1541.0: 2.6141e-01,
1542.0: 2.6165e-01,
1543.0: 2.6439e-01,
1544.0: 2.6443e-01,
1545.0: 2.6922e-01,
1546.0: 2.6705e-01,
1547.0: 2.6537e-01,
1548.0: 2.5920e-01,
1549.0: 2.6568e-01,
1550.0: 2.6226e-01,
1551.0: 2.6293e-01,
1552.0: 2.6415e-01,
1553.0: 2.6366e-01,
1554.0: 2.5727e-01,
1555.0: 2.6005e-01,
1556.0: 2.5569e-01,
1557.0: 2.6301e-01,
1558.0: 2.6093e-01,
1559.0: 2.6054e-01,
1560.0: 2.5821e-01,
1561.0: 2.6242e-01,
1562.0: 2.6003e-01,
1563.0: 2.5917e-01,
1564.0: 2.5525e-01,
1565.0: 2.5975e-01,
1566.0: 2.5506e-01,
1567.0: 2.5566e-01,
1568.0: 2.4997e-01,
1569.0: 2.4736e-01,
1570.0: 2.3497e-01,
1571.0: 2.2850e-01,
1572.0: 2.3108e-01,
1573.0: 2.2750e-01,
1574.0: 2.3464e-01,
1575.0: 2.3294e-01,
1576.0: 2.3980e-01,
1577.0: 2.0994e-01,
1578.0: 2.2854e-01,
1579.0: 2.3005e-01,
1580.0: 2.3772e-01,
1581.0: 2.4165e-01,
1582.0: 2.3508e-01,
1583.0: 2.4051e-01,
1584.0: 2.4194e-01,
1585.0: 2.5135e-01,
1586.0: 2.4838e-01,
1587.0: 2.4579e-01,
1588.0: 2.4388e-01,
1589.0: 2.2567e-01,
1590.0: 2.3486e-01,
1591.0: 2.3503e-01,
1592.0: 2.4502e-01,
1593.0: 2.5092e-01,
1594.0: 2.4890e-01,
1595.0: 2.5083e-01,
1596.0: 2.3751e-01,
1597.0: 2.3985e-01,
1598.0: 2.4693e-01,
1599.0: 2.3511e-01,
1600.0: 2.3133e-01,
1601.0: 2.1691e-01,
1602.0: 2.1780e-01,
1603.0: 2.1765e-01,
1604.0: 2.2197e-01,
1605.0: 2.3010e-01,
1606.0: 2.3453e-01,
1607.0: 2.2637e-01,
1608.0: 2.2343e-01,
1609.0: 2.2088e-01,
1610.0: 2.1146e-01,
1611.0: 2.2039e-01,
1612.0: 2.2422e-01,
1613.0: 2.3047e-01,
1614.0: 2.3162e-01,
1615.0: 2.3420e-01,
1616.0: 2.2395e-01,
1617.0: 2.2798e-01,
1618.0: 2.3660e-01,
1619.0: 2.3415e-01,
1620.0: 2.2783e-01,
1621.0: 2.2765e-01,
1622.0: 2.3081e-01,
1623.0: 2.3559e-01,
1624.0: 2.3582e-01,
1625.0: 2.3109e-01,
1626.0: 2.3294e-01,
1627.0: 2.3398e-01,
1628.0: 2.3446e-01,
1629.0: 2.3456e-01,
1630.0: 2.2984e-01,
1631.0: 2.3136e-01,
1632.0: 2.3151e-01,
1633.0: 2.2614e-01,
1634.0: 2.2628e-01,
1635.0: 2.2712e-01,
1636.0: 2.2879e-01,
1637.0: 2.2064e-01,
1638.0: 2.1393e-01,
1639.0: 2.1410e-01,
1640.0: 2.0913e-01,
1641.0: 2.1348e-01,
1642.0: 2.1467e-01,
1643.0: 2.0939e-01,
1644.0: 2.1733e-01,
1645.0: 2.1216e-01,
1646.0: 2.1147e-01,
1647.0: 2.2135e-01,
1648.0: 2.1057e-01,
1649.0: 2.1261e-01,
1650.0: 2.1902e-01,
1651.0: 2.0281e-01,
1652.0: 2.1754e-01,
1653.0: 2.1661e-01,
1654.0: 2.0991e-01,
1655.0: 2.1619e-01,
1656.0: 2.1494e-01,
1657.0: 2.1613e-01,
1658.0: 2.1870e-01,
1659.0: 2.1514e-01,
1660.0: 2.1721e-01,
1661.0: 2.1774e-01,
1662.0: 2.1313e-01,
1663.0: 2.1630e-01,
1664.0: 2.1498e-01,
1665.0: 2.0607e-01,
1666.0: 1.7411e-01,
1667.0: 2.0502e-01,
1668.0: 2.0881e-01,
1669.0: 2.0939e-01,
1670.0: 2.1573e-01,
1671.0: 2.1294e-01,
1672.0: 2.0582e-01,
1673.0: 2.1052e-01,
1674.0: 2.1002e-01,
1675.0: 2.0793e-01,
1676.0: 2.0584e-01,
1677.0: 2.0668e-01,
1678.0: 2.0365e-01,
1679.0: 2.0741e-01,
1680.0: 2.0017e-01,
1681.0: 1.8936e-01,
1682.0: 1.9830e-01,
1683.0: 2.0357e-01,
1684.0: 1.9283e-01,
1685.0: 2.0763e-01,
1686.0: 2.0476e-01,
1687.0: 1.9951e-01,
1688.0: 2.0465e-01,
1689.0: 2.0178e-01,
1690.0: 1.9991e-01,
1691.0: 1.8808e-01,
1692.0: 2.0174e-01,
1693.0: 2.0587e-01,
1694.0: 1.9950e-01,
1695.0: 2.0427e-01,
1696.0: 2.0383e-01,
1697.0: 1.7649e-01,
1698.0: 2.0207e-01,
1699.0: 2.0024e-01,
1700.0: 1.9464e-01,
1702.0: 1.9874e-01,
1705.0: 1.9275e-01,
1710.0: 1.8316e-01,
1715.0: 1.8490e-01,
1720.0: 1.8231e-01,
1725.0: 1.7367e-01,
1730.0: 1.6979e-01,
1735.0: 1.5758e-01,
1740.0: 1.6405e-01,
1745.0: 1.5105e-01,
1750.0: 1.6162e-01,
1755.0: 1.4931e-01,
1760.0: 1.5608e-01,
1765.0: 1.2967e-01,
1770.0: 1.3831e-01,
1775.0: 1.1213e-01,
1780.0: 9.8143e-02,
1785.0: 7.5201e-02,
1790.0: 8.6831e-02,
1795.0: 4.5864e-02,
1800.0: 3.1112e-02,
1805.0: 1.4485e-02,
1810.0: 9.4762e-03,
1815.0: 3.2093e-03,
1820.0: 9.6578e-04,
1825.0: 1.2463e-03,
1830.0: 5.0896e-06,
1835.0: 6.2784e-06,
1840.0: 6.1337e-08,
1845.0: 6.1298e-06,
1850.0: 2.9348e-06,
1855.0: 2.7795e-07,
1860.0: 1.0920e-05,
1865.0: 1.6644e-05,
1870.0: 2.6148e-10,
1875.0: 4.4296e-10,
1880.0: 7.6123e-05,
1885.0: 4.3129e-05,
1890.0: 2.1956e-04,
1895.0: 1.2743e-04,
1900.0: 8.4916e-07,
1905.0: 5.5798e-07,
1910.0: 2.2726e-05,
1915.0: 1.9673e-05,
1920.0: 4.4451e-04,
1925.0: 9.2326e-04,
1930.0: 5.4474e-04,
1935.0: 3.5428e-03,
1940.0: 3.2357e-03,
1945.0: 1.0707e-02,
1950.0: 1.6482e-02,
1955.0: 9.8860e-03,
1960.0: 2.1569e-02,
1965.0: 2.8114e-02,
1970.0: 4.8055e-02,
1975.0: 6.6730e-02,
1980.0: 7.4234e-02,
1985.0: 8.1625e-02,
1990.0: 8.4124e-02,
1995.0: 7.9787e-02,
2000.0: 3.7491e-02,
2005.0: 1.4747e-02,
2010.0: 3.9071e-02,
2015.0: 2.6208e-02,
2020.0: 4.4239e-02,
2025.0: 7.2779e-02,
2030.0: 8.3460e-02,
2035.0: 9.4808e-02,
2040.0: 8.8344e-02,
2045.0: 8.9636e-02,
2050.0: 6.6892e-02,
2055.0: 5.4090e-02,
2060.0: 6.8157e-02,
2065.0: 6.0962e-02,
2070.0: 6.4715e-02,
2075.0: 7.6305e-02,
2080.0: 8.5528e-02,
2085.0: 8.3847e-02,
2090.0: 8.7779e-02,
2095.0: 8.8421e-02,
2100.0: 8.4869e-02,
2105.0: 9.1771e-02,
2110.0: 8.8320e-02,
2115.0: 9.0308e-02,
2120.0: 8.6281e-02,
2125.0: 8.7303e-02,
2130.0: 8.8422e-02,
2135.0: 8.8679e-02,
2140.0: 8.9390e-02,
2145.0: 8.8132e-02,
2150.0: 8.3369e-02,
2155.0: 8.3566e-02,
2160.0: 8.2912e-02,
2165.0: 7.5175e-02,
2170.0: 8.0776e-02,
2175.0: 7.9257e-02,
2180.0: 8.0597e-02,
2185.0: 7.3458e-02,
2190.0: 7.7905e-02,
2195.0: 7.7833e-02,
2200.0: 7.0175e-02,
2205.0: 7.2947e-02,
2210.0: 7.8174e-02,
2215.0: 7.5189e-02,
2220.0: 7.6631e-02,
2225.0: 7.4400e-02,
2230.0: 7.4727e-02,
2235.0: 7.3290e-02,
2240.0: 7.2140e-02,
2245.0: 6.9911e-02,
2250.0: 7.1034e-02,
2255.0: 6.6865e-02,
2260.0: 6.6143e-02,
2265.0: 6.7355e-02,
2270.0: 6.4138e-02,
2275.0: 6.3309e-02,
2280.0: 6.5551e-02,
2285.0: 6.2389e-02,
2290.0: 6.2534e-02,
2295.0: 6.0603e-02,
2300.0: 5.8193e-02,
2305.0: 5.8544e-02,
2310.0: 6.3189e-02,
2315.0: 5.7528e-02,
2320.0: 5.1489e-02,
2325.0: 5.5626e-02,
2330.0: 5.6231e-02,
2335.0: 5.7362e-02,
2340.0: 4.5366e-02,
2345.0: 5.0869e-02,
2350.0: 4.1115e-02,
2355.0: 4.6988e-02,
2360.0: 4.9724e-02,
2365.0: 4.8909e-02,
2370.0: 3.0514e-02,
2375.0: 4.3704e-02,
2380.0: 4.2128e-02,
2385.0: 3.0525e-02,
2390.0: 3.6748e-02,
2395.0: 4.0199e-02,
2400.0: 4.3726e-02,
2405.0: 3.3286e-02,
2410.0: 3.3504e-02,
2415.0: 2.7058e-02,
2420.0: 2.6358e-02,
2425.0: 3.2802e-02,
2430.0: 4.4725e-02,
2435.0: 1.4765e-02,
2440.0: 4.2926e-02,
2445.0: 2.0657e-02,
2450.0: 1.3523e-02,
2455.0: 2.4695e-02,
2460.0: 3.3157e-02,
2465.0: 2.4009e-02,
2470.0: 1.6635e-02,
2475.0: 1.6368e-02,
2480.0: 7.9996e-03,
2485.0: 5.5840e-03,
2490.0: 3.4957e-03,
2495.0: 2.8647e-03,
2500.0: 7.0328e-03,
2505.0: 1.5124e-03,
2510.0: 2.2063e-03,
2515.0: 5.1644e-04,
2520.0: 3.6879e-04,
2525.0: 4.1194e-05,
2530.0: 6.3279e-07,
2535.0: 1.7415e-07,
2540.0: 3.7521e-07,
2545.0: 5.3469e-11,
2550.0: 2.8066e-13,
2555.0: 1.0377e-09,
2560.0: 3.0842e-11,
2565.0: | |
#!/usr/bin/env python
# coding: utf-8
# # 2.4 Support vector machine
# There is a lot of work about SVM in literature , see
# [@drucker1997support; @ben2001support; @cortes1995support; @cristianini2000introduction]
# for example. Given a binary linearly separable classification dataset
# ${(x_i,y_i)}_{i = 1}^N$, where
# $x_i\in \mathbb{R}^d, y_i\in \left \{\begin{pmatrix}1\\0\end{pmatrix}, \begin{pmatrix}0\\1\end{pmatrix}\right \}$.
# We use $A_1,A_2$ to denote the data with label
# $\begin{pmatrix}1\\0\end{pmatrix}, \begin{pmatrix}0\\1\end{pmatrix}$,
# respectively. Our goal is to find a $\theta = (w,b)$ where
# $w\in \mathbb{R}^{1\times d}, b\in \mathbb{R}$ such that the hyperplane
# $H_{\theta} = \{x:wx + b = 0\}$ can separate $A_1,A_2$.
# ## 2.4.1 Binary SVM
# Binary Support Vector Machine (SVM for short hereinafter) wants to find
# the classifiable hyperplane which has the biggest distance with $A_1$
# and $A_2$. Assume that we have the hyperplanes $wx+b=\pm 1$ with
# $$wx_i+b\ge 1 \quad \mbox{for}\quad x_i\in A_1,\quad wx_i+b\le -1 \quad \mbox{for}\quad x_i\in A_2,$$
# which is similar to the definition
# [\[2classH\]](#2classH){reference-type="eqref" reference="2classH"}. Let
# $y_1=\begin{pmatrix}1\\0\end{pmatrix}$ for $x_i\in A_1$ and
# $y_2=\begin{pmatrix}0\\1\end{pmatrix}$ for $x_i\in A_2$. Note that $w$
# is normal to the hyperplane and the distance between the points
# satisfying $$wx_i+b=\pm 1$$ and the hyperplane $wx+b=0$ is
# $\displaystyle {1\over \|w\|_2}$. Thus, the width of the margin is
# $\displaystyle {2\over \|w\|_2}$ as shown in Figure
# [1](#fig:margin){reference-type="ref" reference="fig:margin"}.
#
# {#fig:margin width="2in"}
#
# For any $w$ and $b$, the smallest distance between points and the
# hyperplane is $$\frac{\min_{i} \ell_i(wx_i+b)}{\|w\|_2},$$ where
# $\ell_i=1-2e_2^Ty_i$. Note that $$\ell_i=\begin{cases}
# 1 & \mbox{ if } y_i=\begin{pmatrix}1\\0\end{pmatrix},
# \\
# -1 &\mbox{ if } y_i=\begin{pmatrix}0\\1\end{pmatrix}.
# \end{cases}$$ Consider the problem
# $$\max_{w,b} \frac{\min_{i} \ell_i(wx_i+b)}{\|w\|_2}.$$ Intuitively, the
# best separating hyperplane $H$ is only determined by those data points
# who are closest to $H$. Those data points are called support vector, and
# this method are called support vector machine.
#
# Without loss of generality, we may restrict the norm of $\|w\|$ to be 1,
# which leads to a equivalent optimization problem
# $$\max_{\|w\|_2 = 1} \min_{i} \ell_i(wx_i+b)$$ Actually, we can prove
# $\displaystyle \mathop{\rm argmax}_{\|w\|_2 = 1} \min_{i} \ell_i(wx_i+b)$
# is nonempty, but here we just admit this fact and only prove the
# uniqueness of the solution.
#
# ::: lemma
# If $A_1,A_2$ are linearly separable, then $$\label{binarySVM}
# \mathop{\rm argmax}_{\|w\|_2 = 1} \min_{i} \ell_i(wx_i+b)$$ is
# nonempty.
# :::
#
# ::: proof
# *Proof.* Take $x_{i_1} \in A_1$ and $x_{i_2} \in A_2$, given
# $(w,b)\in \{(w,b): l_i(wx_i +b)>0, \forall i\}$, we have $$\begin{cases}
# wx_{i_1} + b > 0,\\
# wx_{i_2} + b < 0
# \end{cases}$$ which implies $|b| < \max_{i} \|x_i\|_2$. So we
# have
# $$\mathop{\rm argmax}_{\|w\|_2 = 1} \min_{i} \ell_i(wx_i+b) = \mathop{\rm argmax}_{\|w\|_2 = 1, ~|b|\leq \max_{i} \|x_i\|_2} \min_{i} \ell_i(wx_i+b) \neq \emptyset.$$ ◻
# :::
#
# ::: lemma
# If $A_1,A_2$ are linearly separable, then
# $$\mathop{\rm argmax}_{\|w\|_2 = 1} \min_{i} \ell_i(wx_i+b)$$ is a
# singleton set.
# :::
#
# ::: proof
# *Proof.* Denote $\displaystyle m(w,b) = \min_{i} \ell_i(wx_i+b)$. Notice
# that $m(w,b)$ is a concave homogeneous function w.r.t $w,b$ and
# $\|\cdot\|_2$ is a strictly convex norm. Suppose there are two solution
# $(w_1,b_1)$ and $(w_2,b_2)$ such that $w_1 \neq w_2$, take
# $\overline{w} = \frac{w_1 + w_2}{2}, \overline{b} = \frac{b_1 + b_2}{2}$,
# we must have
# $$m(\overline{w},\overline{b}) \geq \frac{m(w_1,b_1)+ m(w_2,b_2)}{2} = \max_{\|w\|_2 = 1} m(w,b),$$
# and $$\|\overline{w}\|_2 < 1.$$ So
# $$m(\frac{\overline{w}}{\|\overline{w}\|_2},\frac{\overline{b}}{\|\overline{w}\|_2}) = \frac{m(\overline{w},\overline{b}) }{\|\overline{w}\|_2} > \max_{\|w\|_2 = 1} m(w,b),$$
# which leads to a contradiction. So all the solutions must have the same
# $w$, we denote it as $w^*$. Then if $(w^*,b^*)$ is a solution of problem
# ([\[binarySVM\]](#binarySVM){reference-type="ref"
# reference="binarySVM"}), we must have
# $$b^* \in \mathop{\rm argmax}_{b} m(w^*,b)$$ Actually,
# $$m(w^*,b) = \min\{b+\min_{x\in A_1} w^*x, -b +\min_{x\in A_2} (-w^*x)\}.$$
# It is easy to observe that
# $\displaystyle \mathop{\rm argmax}_{b} m(w^*,b)$ is a singleton set and
# $$b^* = \frac{\min_{x\in A_2} (-w^*x) - \min_{x\in A_1} w^*x}{2}.$$ ◻
# :::
#
# Denote $$\label{maxSVM}
# \theta^*_{SVM} = (w_{SVM}^*,b_{SVM}^*) = \mathop{\rm argmax}_{\|w\|_2 = 1} \min_{i} \ell_i(wx_i+b).$$
#
# ::: theorem
# Let $\theta^*_{SVM} = (w_{SVM}^*,b_{SVM}^*)$ be the solution of
# [\[maxSVM\]](#maxSVM){reference-type="eqref" reference="maxSVM"}. Then,
# $w_{SVM}^*$ must be a linear combination of $x_i^T, i = 1,2,\cdots,N$.
# :::
#
# ::: proof
# *Proof.* Denote $$S = {\rm span} \{x_i^T\}_{i=1}^N.$$ Then we have
# $$\mathbb{R}^{1\times d} = S \oplus^{\perp} S^{\perp}.$$ So $w_{SVM}^*$
# can be uniquely decomposed as $w_{SVM}^* = w^*_S + w^*_{S^{\perp}}$
# where $w_S\in S$ and $w^*_{S^{\perp}}\in S^{\perp}$. We will prove that
# $w^*_{S^{\perp}} = 0$. Suppose not, we have
# $$\|w^*_S\|_2 < \|w_{SVM}^*\|_2 = 1.$$ Notice that
# $$w_{SVM}^* x_i = w_S^* x_i,\ \forall i = 1,2,\cdots,N.$$ Thus we have
# $$\min_{i} \ell_i(w_{SVM}^*x_i+b_{SVM}^*) = \min_{i} \ell_i(w_S^*x_i+b_{SVM}^*).$$
# So
# $$\min_{i} \ell_i(w_{SVM}^*x_i+b_{SVM}^*) < \frac{\min_{i} \ell_i(w_S^*x_i+b_{SVM}^*)}{\|w_S^*\|} = \min_{i} \ell_i(\frac{w^*_S}{\|w_S^*\|_2}x_i+\frac{b_{SVM}^*}{\|w^*_S}\|_2),$$
# which leads to a contradiction to the definition of $\theta_{SVM}^*$. ◻
# :::
# ## 2.4.2 Soft margin maximization and kernel methods
# We may rewrite the SVM problem as $$\begin{aligned}
# \max_{w,b}&\ {2\over \|w\|},\\
# s.t.&\ \ell_i(wx_i+b) \geq 1,\ \forall i. \end{aligned}$$ or
# equivalently, $$\begin{aligned}
# {\label{SVM_Quad}}
# \min_{w,b}&\ \|w\|^2,\\
# s.t.&\ \ell_i(wx_i+b) \geq 1,\ \forall i. \end{aligned}$$ Notice
# that the feasible domain of margin maximization is nonempty if and only
# if dataset is linearly separable. So when the data is linearly
# nonseparable, this method can't even get a classifier even though it may
# not be good. One way to handle this problem is to relax the constraint
# by adding relaxation variables.
#
# Define soft margin maximization problem $$\begin{aligned}
# {\label{SVM_Quad_soft}}
# \min_{w,b,\xi}&\ \|w\|^2 + \lambda^{-1} \sum_{i = 1}^N\xi_i,\\
# s.t.&\ \ell_i(wx_i+b) + \xi_i \geq 1,\ \forall i. \\
# &\ \xi_i \geq 0.\end{aligned}$$ where $\lambda>0$. The above
# problem is equivalent to $$\begin{aligned}
# \min_{w,b}&\ \|w\|^2 + \lambda^{-1} \sum_{i = 1}^N {\rm ReLU}(1-\ell_i(wx_i+b)).
# \end{aligned}$$ Thus, soft margin maximization problem
# [\[SVM_Quad_soft\]](#SVM_Quad_soft){reference-type="eqref"
# reference="SVM_Quad_soft"} can be reformulated as $$\begin{aligned}
# {\label{SVM_soft}}
# \min_{w,b}&\ \sum_{i = 1}^N {\rm ReLU}(1-\ell_i(wx_i+b)) + \lambda \|w\|^2.\end{aligned}$$
# We can still prove that the solution of
# ([\[SVM_soft\]](#SVM_soft){reference-type="ref" reference="SVM_soft"})
# satisfies the representation theorem. Thus we can restrict $w$ to be in
# the set $S$. Assume that $$w = \sum_{i = 1}^N \alpha_i x_i^T,$$ Denote
# $\alpha = (\alpha_1,\cdots,\alpha_N)^T$. We can rewrite the problem
# ([\[SVM_soft\]](#SVM_soft){reference-type="ref" reference="SVM_soft"})
# as
# $$\min_{\alpha}\ \sum_{i = 1}^N {\rm ReLU}(1-\ell_i(\sum_{j = 1}^N \langle x_i,x_j\rangle \alpha_j+b)) + \lambda\alpha^T \big(\langle x_i,x_j\rangle\big)_{N\times N} \alpha\\$$
# We can see that the whole problem is only determined by the inner
# product of data points but not the data itself directly.\
# Use the above formulation, we can induce nonlinearity in SVM. Denote the
# input space as $X$ where $\{x_i\}_{i=1}^N \subset X$. We use two steps
# to obtain a nonlinear classification model. First, we use a nonlinear
# feature mapping $\phi: X\rightarrow \mathcal{H}$ to map input space $X$
# to a feature space $\mathcal{H}$. Second, we use linear SVM to do
# classification on $\{\phi(x_i)\}_{i=1}^N\subset \mathcal{H}$.\
# We may just asssume dataset after feature mapping $\phi$ is linearly
# separable. Then, the SVM problem after doing feature mapping can be
# formulated as problem ([\[SVM_soft\]](#SVM_soft){reference-type="ref"
# reference="SVM_soft"}) as
# $$\min_{\alpha}\ \sum_{i = 1}^N {\rm ReLU}(1-\ell_i(\sum_{j = 1}^N \langle \phi(x_i),\phi(x_j)\rangle \alpha_j+b)) + \lambda\alpha^T \big(\langle \phi(x_i),\phi(x_j)\rangle\big)_{N\times N} \alpha\\$$
#
# Notice that to obtain the above problem we don't really need to know
# what exactly is the nonlinear mapping $\phi$, but only need to compute
# the value of $<\phi(x_i),\phi(x_j)>$. So we define a kernel function
# $k: X\times X\rightarrow \mathbb{R}$ such that
# $$k(x,y) = \langle\phi(x),\phi(y)\rangle,\ x,y\in X.$$ Then the kernel
# SVM can be formulated as
# $$\min_{\alpha}\ \sum_{i = 1}^N {\rm ReLU}(1-\ell_i(\sum_{j = 1}^N k(x_i,x_j) \alpha_j+b)) + \lambda\alpha^T \big(k(x_i,x_j)\big)_{N\times N} \alpha\\$$
# In practice, we just need to find a proper kernel function instead of a
# good nonlinear feature mapping. Here we list some common used kernel
# functions:
#
# - Polynomial kernel:
# $k(x,y) = (a\langle x,y\rangle+ b)^n, a > 0, b\geq 0, n\in \mathbb{N}^+$.
#
# - Gaussian kernel: $k(x,y) = e^{-\gamma\|x-y\|^2}, \gamma > 0$.
#
# - Laplacian kernel: $k(x,y) = e^{-\gamma\|x-y\|}, \gamma > 0$
#
# - Tanh kernel: $k(x,y) = \tanh(a\langle x,y\rangle+b), a>0, b\geq 0.$
# ## 2.4.3 Binary logistic regression
#
# In multi-class Logistic regression, if we use $\|W\|$ to replace
# $\|\bm\theta\|$ in regularization term, we can get another version of
# logistic regression:
# $$\mathcal L_\lambda(\bm \theta) = - \sum_{i=1}^k \sum_{x\in A_i} \log p_{i}(x;\bm \theta) + \lambda R(\|W\|),$$
# where $p_i(x;\bm \theta)$ and $R(\cdot)$ share the same definitions as
# in previous sections of logistic regression. Let
# $$\bm\Theta_{\lambda} = \mathop{{\arg\min}}_{\bm\theta} \mathcal L_\lambda(\bm\theta).$$
# The following lemma follows directly from the definition of
# $p_{i}(x;\bm \theta)$.
#
# ::: lemma
# | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import mock
import pytest
from kafka.common import NotLeaderForPartitionError
from kafka.common import OffsetCommitResponse
from kafka.common import OffsetFetchResponse
from kafka.common import OffsetResponse
from kafka.common import RequestTimedOutError
from kafka.common import UnknownTopicOrPartitionError
from yelp_kafka.error import InvalidOffsetStorageError
from yelp_kafka.offsets import _verify_commit_offsets_requests
from yelp_kafka.offsets import advance_consumer_offsets
from yelp_kafka.offsets import get_current_consumer_offsets
from yelp_kafka.offsets import get_topics_watermarks
from yelp_kafka.offsets import OffsetCommitError
from yelp_kafka.offsets import PartitionOffsets
from yelp_kafka.offsets import rewind_consumer_offsets
from yelp_kafka.offsets import set_consumer_offsets
from yelp_kafka.offsets import UnknownPartitions
from yelp_kafka.offsets import UnknownTopic
@pytest.fixture(params=[['topic1'], set(['topic1']), ('topic1',)])
def topics(request):
return request.param
class MyKafkaClient(object):
def __init__(
self,
topics,
group_offsets,
high_offsets,
low_offsets
):
self.topics = topics
self.group_offsets = group_offsets
self.high_offsets = high_offsets
self.low_offsets = low_offsets
self.commit_error = False
self.offset_request_error = False
def load_metadata_for_topics(self):
pass
def send_offset_request(
self,
payloads=None,
fail_on_error=True,
callback=None
):
if payloads is None:
payloads = []
resps = []
for req in payloads:
if req.time == -1:
offset = self.high_offsets[req.topic.decode()].get(req.partition, -1)
else:
offset = self.low_offsets[req.topic.decode()].get(req.partition, -1)
if self.offset_request_error:
error_code = NotLeaderForPartitionError.errno
elif req.partition not in self.topics[req.topic.decode()]:
error_code = UnknownTopicOrPartitionError.errno
else:
error_code = 0
resps.append(OffsetResponse(
req.topic.decode(),
req.partition,
error_code,
(offset,)
))
return [resp if not callback else callback(resp) for resp in resps]
def set_commit_error(self):
self.commit_error = True
def set_offset_request_error(self):
self.offset_request_error = True
def send_offset_commit_request(
self,
group,
payloads=None,
fail_on_error=True,
callback=None
):
if payloads is None:
payloads = []
resps = []
for req in payloads:
if not self.commit_error:
self.group_offsets[req.topic.decode()][req.partition] = req.offset
resps.append(
OffsetCommitResponse(
req.topic.decode(),
req.partition,
0
)
)
else:
resps.append(
OffsetCommitResponse(
req.topic.decode(),
req.partition,
RequestTimedOutError.errno
)
)
return [resp if not callback else callback(resp) for resp in resps]
def send_offset_commit_request_kafka(
self,
group,
payloads=None,
fail_on_error=True,
callback=None
):
return self.send_offset_commit_request(
group,
payloads,
fail_on_error,
callback,
)
def has_metadata_for_topic(self, t):
return t in self.topics
def get_partition_ids_for_topic(self, topic):
return self.topics[topic]
def send_offset_fetch_request(
self,
group,
payloads,
fail_on_error,
callback,
):
return self._send_offset_fetch_request_either(
group,
payloads,
fail_on_error,
callback,
)
def send_offset_fetch_request_kafka(
self,
group,
payloads,
fail_on_error,
callback
):
return self._send_offset_fetch_request_either(
group,
payloads,
fail_on_error,
callback,
)
def _send_offset_fetch_request_either(
self,
group,
payloads,
fail_on_error,
callback
):
return [
callback(
OffsetFetchResponse(
req.topic.decode(),
req.partition,
self.group_offsets[req.topic.decode()].get(req.partition, -1),
None,
0 if req.partition in self.group_offsets[req.topic.decode()] else 3
),
)
for req in payloads
]
class TestOffsetsBase(object):
topics = {
'topic1': [0, 1, 2],
'topic2': [0, 1]
}
group = 'group_name'
high_offsets = {
'topic1': {
0: 30,
1: 30,
2: 30,
},
'topic2': {
0: 50,
1: 50
}
}
low_offsets = {
'topic1': {
0: 10,
1: 5,
2: 3,
},
'topic2': {
0: 5,
1: 5,
}
}
group_offsets = {
'topic1': {
0: 30,
1: 20,
2: 10,
},
'topic2': {
0: 15,
}
}
@pytest.fixture
def kafka_client_mock(self):
return MyKafkaClient(
self.topics,
copy.deepcopy(self.group_offsets),
self.high_offsets,
self.low_offsets
)
class TestOffsets(TestOffsetsBase):
def test_get_current_consumer_offsets_invalid_arguments(self, kafka_client_mock):
with pytest.raises(TypeError):
get_current_consumer_offsets(
kafka_client_mock,
"this won't even be consulted",
"this should be a list or dict",
)
def test_get_current_consumer_offsets_unknown_topic(self, kafka_client_mock):
with pytest.raises(UnknownTopic):
get_current_consumer_offsets(
kafka_client_mock,
"this won't even be consulted",
["something that doesn't exist"],
)
def test_get_current_consumer_offsets_unknown_topic_no_fail(self, kafka_client_mock):
actual = get_current_consumer_offsets(
kafka_client_mock,
"this won't even be consulted",
["something that doesn't exist"],
raise_on_error=False
)
assert not actual
def test_get_current_consumer_offsets_unknown_partitions(self, kafka_client_mock):
with pytest.raises(UnknownPartitions):
get_current_consumer_offsets(
kafka_client_mock,
self.group,
{'topic1': [99]},
)
def test_get_current_consumer_offsets_unknown_partitions_no_fail(self, kafka_client_mock):
actual = get_current_consumer_offsets(
kafka_client_mock,
self.group,
{'topic1': [99]},
raise_on_error=False
)
assert not actual
def test_get_current_consumer_offsets_invalid_partition_subset(self, kafka_client_mock):
with pytest.raises(UnknownPartitions):
get_current_consumer_offsets(
kafka_client_mock,
self.group,
{'topic1': [1, 99]},
)
def test_get_current_consumer_offsets_invalid_partition_subset_no_fail(self, kafka_client_mock):
actual = get_current_consumer_offsets(
kafka_client_mock,
self.group,
{'topic1': [1, 99]},
raise_on_error=False
)
assert actual['topic1'][1] == 20
# Partition 99 does not exist so it shouldn't be in the result
assert 99 not in actual['topic1']
def test_get_current_consumer_offsets(self, topics, kafka_client_mock):
actual = get_current_consumer_offsets(
kafka_client_mock,
self.group,
topics
)
assert actual == {'topic1': {0: 30, 1: 20, 2: 10}}
def test_get_current_consumer_offsets_from_zookeeper(
self,
topics,
kafka_client_mock
):
kafka_client_mock = mock.Mock(wraps=kafka_client_mock)
get_current_consumer_offsets(
kafka_client_mock,
self.group,
topics,
offset_storage='zookeeper',
)
assert kafka_client_mock.send_offset_fetch_request.call_count == 1
assert kafka_client_mock.send_offset_fetch_request_kafka.call_count == 0
def test_get_current_consumer_offsets_from_kafka(
self,
topics,
kafka_client_mock
):
kafka_client_mock = mock.Mock(wraps=kafka_client_mock)
get_current_consumer_offsets(
kafka_client_mock,
self.group,
topics,
offset_storage='kafka',
)
assert kafka_client_mock.send_offset_fetch_request.call_count == 0
assert kafka_client_mock.send_offset_fetch_request_kafka.call_count == 1
def test_get_current_consumer_offsets_invalid_storage(
self,
topics,
kafka_client_mock
):
kafka_client_mock = mock.Mock(wraps=kafka_client_mock)
with pytest.raises(InvalidOffsetStorageError):
get_current_consumer_offsets(
kafka_client_mock,
self.group,
topics,
offset_storage='random_string',
)
assert kafka_client_mock.send_offset_fetch_request.call_count == 0
assert kafka_client_mock.send_offset_fetch_request_kafka.call_count == 0
def test_get_topics_watermarks_invalid_arguments(self, kafka_client_mock):
with pytest.raises(TypeError):
get_topics_watermarks(
kafka_client_mock,
"this should be a list or dict",
)
def test_get_topics_watermarks_unknown_topic(self, kafka_client_mock):
with pytest.raises(UnknownTopic):
get_topics_watermarks(
kafka_client_mock,
["something that doesn't exist"],
)
def test_get_topics_watermarks_unknown_topic_no_fail(self, kafka_client_mock):
actual = get_topics_watermarks(
kafka_client_mock,
["something that doesn't exist"],
raise_on_error=False,
)
assert not actual
def test_get_topics_watermarks_unknown_partitions(self, kafka_client_mock):
with pytest.raises(UnknownPartitions):
get_topics_watermarks(
kafka_client_mock,
{'topic1': [99]},
)
def test_get_topics_watermarks_unknown_partitions_no_fail(self, kafka_client_mock):
actual = get_topics_watermarks(
kafka_client_mock,
{'topic1': [99]},
raise_on_error=False,
)
assert not actual
def test_get_topics_watermarks_invalid_partition_subset(self, kafka_client_mock):
with pytest.raises(UnknownPartitions):
get_topics_watermarks(
kafka_client_mock,
{'topic1': [1, 99]},
)
def test_get_topics_watermarks_invalid_partition_subset_no_fail(self, kafka_client_mock):
actual = get_topics_watermarks(
kafka_client_mock,
{'topic1': [1, 99]},
raise_on_error=False,
)
assert actual['topic1'][1] == PartitionOffsets('topic1', 1, 30, 5)
assert 99 not in actual['topic1']
def test_get_topics_watermarks(self, topics, kafka_client_mock):
actual = get_topics_watermarks(
kafka_client_mock,
topics,
)
assert actual == {'topic1': {
0: PartitionOffsets('topic1', 0, 30, 10),
1: PartitionOffsets('topic1', 1, 30, 5),
2: PartitionOffsets('topic1', 2, 30, 3),
}}
def test_get_topics_watermarks_commit_error(self, topics, kafka_client_mock):
kafka_client_mock.set_offset_request_error()
actual = get_topics_watermarks(
kafka_client_mock,
{'topic1': [0]},
)
assert actual == {'topic1': {
0: PartitionOffsets('topic1', 0, -1, -1),
}}
def test__verify_commit_offsets_requests(self, kafka_client_mock):
new_offsets = {
'topic1': {
0: 123,
1: 456,
},
'topic2': {
0: 12,
},
}
valid_new_offsets = _verify_commit_offsets_requests(
kafka_client_mock,
new_offsets,
True
)
assert new_offsets == valid_new_offsets
def test__verify_commit_offsets_requests_invalid_types_raise_error(
self,
kafka_client_mock
):
new_offsets = "my_str"
with pytest.raises(TypeError):
_verify_commit_offsets_requests(
kafka_client_mock,
new_offsets,
True
)
def test__verify_commit_offsets_requests_invalid_types_no_raise_error(
self,
kafka_client_mock
):
new_offsets = {'topic1': 2, 'topic2': 1}
with pytest.raises(TypeError):
_verify_commit_offsets_requests(
kafka_client_mock,
new_offsets,
False
)
def test__verify_commit_offsets_requests_bad_partitions(
self,
kafka_client_mock
):
new_offsets = {
'topic1': {
23: 123,
11: 456,
},
'topic2': {
21: 12,
},
}
with pytest.raises(UnknownPartitions):
_verify_commit_offsets_requests(
kafka_client_mock,
new_offsets,
True
)
def test__verify_commit_offsets_requests_bad_topics(
self,
kafka_client_mock
):
new_offsets = {
'topic32': {
0: 123,
1: 456,
},
'topic33': {
0: 12,
},
}
with pytest.raises(UnknownTopic):
_verify_commit_offsets_requests(
kafka_client_mock,
new_offsets,
True
)
def test__verify_commit_offsets_requests_bad_partitions_no_fail(
self,
kafka_client_mock
):
new_offsets = {
'topic1': {
0: 32,
23: 123,
11: 456,
},
'topic2': {
21: 12,
},
}
valid_new_offsets = _verify_commit_offsets_requests(
kafka_client_mock,
new_offsets,
False
)
expected_valid_offsets = {
'topic1': {
0: 32,
},
}
assert valid_new_offsets == expected_valid_offsets
def test__verify_commit_offsets_requests_bad_topics_no_fail(
self,
kafka_client_mock
):
new_offsets = {
'topic32': {
0: 123,
1: 456,
},
'topic33': {
0: 12,
},
}
valid_new_offsets = _verify_commit_offsets_requests(
kafka_client_mock,
new_offsets,
False
)
assert valid_new_offsets == {}
def test_advance_consumer_offsets(self, kafka_client_mock):
topics = {
'topic1': [0, 1, 2],
'topic2': [0, 1],
}
status = list(advance_consumer_offsets(
kafka_client_mock,
"group",
topics
))
assert status == []
assert kafka_client_mock.group_offsets == self.high_offsets
def test_advance_consumer_offsets_fail(self, kafka_client_mock):
kafka_client_mock.set_commit_error()
topics = {
'topic1': [0, 1, 2],
'topic2': [0, 1],
}
expected_status = [
OffsetCommitError("topic1", 0, RequestTimedOutError.message),
OffsetCommitError("topic1", 1, RequestTimedOutError.message),
OffsetCommitError("topic1", 2, RequestTimedOutError.message),
OffsetCommitError("topic2", 0, RequestTimedOutError.message),
OffsetCommitError("topic2", 1, RequestTimedOutError.message),
]
status = list(advance_consumer_offsets(
kafka_client_mock,
"group",
topics
))
assert len(status) == len(expected_status)
for expected in expected_status:
assert any(actual == expected for actual in status)
assert kafka_client_mock.group_offsets == self.group_offsets
def test_rewind_consumer_offsets_zk(self, kafka_client_mock):
topics = {
'topic1': [0, 1, 2],
'topic2': [0, 1],
}
kafka_client_spy = mock.Mock(wraps=kafka_client_mock)
status = list(rewind_consumer_offsets(
kafka_client_spy,
"group",
topics
))
assert status == []
assert kafka_client_mock.group_offsets == self.low_offsets
assert kafka_client_spy.send_offset_commit_request.called
assert not kafka_client_spy.send_offset_commit_request_kafka.called
def test_rewind_consumer_offsets_kafka(self, kafka_client_mock):
topics = {
'topic1': [0, 1, 2],
'topic2': [0, 1],
}
client_spy = mock.Mock(wraps=kafka_client_mock)
rewind_consumer_offsets(
client_spy,
"group",
topics,
offset_storage='kafka',
)
assert not client_spy.send_offset_commit_request.called
assert client_spy.send_offset_commit_request_kafka.called
def test_rewind_consumer_offsets_fail(self, kafka_client_mock):
kafka_client_mock.set_commit_error()
topics = {
'topic1': [0, 1, 2],
'topic2': [0, 1],
}
expected_status = [
OffsetCommitError("topic1", 0, RequestTimedOutError.message),
OffsetCommitError("topic1", 1, RequestTimedOutError.message),
OffsetCommitError("topic1", 2, RequestTimedOutError.message),
OffsetCommitError("topic2", 0, RequestTimedOutError.message),
OffsetCommitError("topic2", 1, RequestTimedOutError.message),
]
status = list(rewind_consumer_offsets(
kafka_client_mock,
"group",
topics
))
assert len(status) == len(expected_status)
for expected in expected_status:
assert any(actual == expected for actual in status)
assert kafka_client_mock.group_offsets == self.group_offsets
def test_set_consumer_offsets_zk(self, kafka_client_mock):
new_offsets = | |
#
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Code responsible for initiating and carrying out the deployment of applications, services and
brokers.
"""
import glob
import json
import logging
from os import path, remove
import subprocess
from zipfile import ZipFile
import datadiff
from pkg_resources import parse_version
import yaml
import apployer.app_file as app_file
from apployer import cf_cli, cf_api, dry_run
from .cf_cli import CommandFailedError
_log = logging.getLogger(__name__) #pylint: disable=invalid-name
UPGRADE_STRATEGY = 'UPGRADE'
PUSH_ALL_STRATEGY = 'PUSH_ALL'
UNPACKED_ARTIFACTS_FOLDER = 'apps'
FINAL_MANIFESTS_FOLDER = 'manifests'
DEPLOYER_OUTPUT = 'apployer_out'
SG_RULES_FILENAME = 'set-access.json'
def deploy_appstack(cf_login_data, filled_appstack, artifacts_path, is_dry_run, push_strategy):
"""Deploys the appstack to Cloud Foundry.
Args:
cf_login_data (`apployer.cf_cli.CfInfo`): Credentials and addresses needed to log into
Cloud Foundry.
filled_appstack (`apployer.appstack.AppStack`): Expanded appstack filled with configuration
extracted from a live TAP environment.
artifacts_path (str): Path to a directory containing application artifacts (zips).
push_strategy (str): Strategy for pushing applications.
is_dry_run (bool): Is this a dry run? If set to True, no changes (except for creating org
and space) will be introduced to targeted Cloud Foundry.
"""
global cf_cli, register_in_application_broker #pylint: disable=C0103,W0603,W0601
if is_dry_run:
normal_cf_cli = cf_cli
cf_cli = dry_run.get_dry_run_cf_cli()
normal_register_in_app_broker = register_in_application_broker
register_in_application_broker = dry_run.get_dry_function(register_in_application_broker)
try:
_do_deploy(cf_login_data, filled_appstack, artifacts_path, is_dry_run, push_strategy)
finally:
if is_dry_run:
cf_cli = normal_cf_cli
register_in_application_broker = normal_register_in_app_broker
def _do_deploy(cf_login_data, filled_appstack, artifacts_path, is_dry_run, push_strategy):
"""Iterates over each CF entity defined in filled_appstack
and executes CF commands necessery for deployment.
Args:
cf_login_data (`apployer.cf_cli.CfInfo`): Credentials and addresses needed to log into
Cloud Foundry.
filled_appstack (`apployer.appstack.AppStack`): Expanded appstack filled with configuration
extracted from a live TAP environment.
artifacts_path (str): Path to a directory containing application artifacts (zips).
is_dry_run (bool): When enabled then all write commands to CF will be only logged.
push_strategy (str): Strategy for pushing applications.
"""
_prepare_org_and_space(cf_login_data)
apps_to_restart = []
for security_group in filled_appstack.security_groups:
if is_push_enabled(security_group.push_if):
setup_security_group(cf_login_data, security_group)
for service in filled_appstack.user_provided_services:
if is_push_enabled(service.push_if):
affected_apps = UpsiDeployer(service).deploy()
apps_to_restart.extend(affected_apps)
for broker in filled_appstack.brokers:
if is_push_enabled(broker.push_if):
setup_broker(broker)
for buildpack in filled_appstack.buildpacks:
setup_buildpack(buildpack, artifacts_path)
names_to_apps = {app.name: app for app in filled_appstack.apps}
for app in filled_appstack.apps:
if is_push_enabled(app.push_if):
app_deployer = AppDeployer(app, DEPLOYER_OUTPUT)
affected_apps = app_deployer.deploy(artifacts_path, is_dry_run, push_strategy)
apps_to_restart.extend(affected_apps)
if app.register_in:
# FIXME this universal mechanism is kind of pointless, because we can only do
# registering in application-broker. Even we made "register.sh" in the registrator
# app to be universal, we still need to pass a specific set of arguments to the
# script.
# And those are arguments wanted by the application-broker.
registrator_name = app.register_in
register_in_application_broker(
app,
names_to_apps[registrator_name],
filled_appstack.domain,
DEPLOYER_OUTPUT,
artifacts_path)
_restart_apps(filled_appstack, apps_to_restart)
_execute_post_actions(filled_appstack.post_actions, artifacts_path)
_log.info('DEPLOYMENT FINISHED')
def is_push_enabled(value):
"""To ensure that value passed is a boolean value, not string (which in appstack.yml is
possible)
Args:
value(bool or str): value from which covert to bool; should be bool or 'true' or
'false' case insensitive
"""
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise Exception("Incorrect string value: " + value + " ! Should be \"true\" or "
"\"false\" (case insensitive)")
raise Exception("Incorrect type: " + type(value) + " Should be bool or str.")
def register_in_application_broker(registered_app, # pylint: disable=function-redefined
application_broker, app_domain,
unpacked_apps_dir, artifacts_location):
"""Registers an application in another application that provides some special functionality.
E.g. there's the application-broker app that registers another application as a broker.
Args:
registered_app (`apployer.appstack.AppConfig`): Application being registered.
application_broker (`apployer.appstack.AppConfig`): Application doing the registering.
app_domain (str): Address domain for TAP applications.
unpacked_apps_dir (str): Directory with unpacked artifacts.
artifacts_location (str): Location of unpacked application artifacts.
"""
_log.info('Registering app %s in %s...', registered_app.name, application_broker.name)
application_broker_url = 'http://{}.{}'.format(application_broker.name, app_domain)
register_script_path = path.join(unpacked_apps_dir, application_broker.name, 'register.sh')
if not path.exists(register_script_path):
_log.debug("Registration script %s doesn't exist. Most probably, the artifact it's in "
"didn't need to be unpacked yet. Gonna do that now...")
AppDeployer(application_broker, unpacked_apps_dir).prepare(artifacts_location)
command = ['/bin/bash', register_script_path, '-b', application_broker_url,
'-a', registered_app.name, '-n', registered_app.name,
'-u', application_broker.app_properties['env']['AUTH_USER'],
'-p', application_broker.app_properties['env']['AUTH_PASS']]
app_env = registered_app.app_properties['env']
display_name = app_env.get('display_name')
if display_name:
command.extend(['-s', display_name])
description = app_env.get('description')
if description:
command.extend(['-d', description])
image_url = app_env.get('image_url')
if image_url:
command.extend(['-i', image_url])
if registered_app.register_config:
command.extend(['-c', registered_app.register_config])
_log.info('Running registration script: %s', ' '.join(command))
subprocess.check_call(command)
def setup_broker(broker):
"""Sets up a broker.It will be created if it doesn't exist. It will be updated otherwise.
All of its instances will be created if they don't already. Nothing will be done to them if
they already exist.
Args:
broker (`apployer.appstack.BrokerConfig`): Configuration of a service broker.
Raises:
CommandFailedError: Failed to set up the broker.
"""
_log.info('Setting up broker %s...', broker.name)
broker_args = [broker.name, broker.auth_username, broker.auth_password, broker.url]
if broker.name not in cf_cli.service_brokers():
_log.info("Broker %s doesn't exist. Gonna create it now...", broker.name)
cf_cli.create_service_broker(*broker_args)
else:
_log.info("Broker %s exists. Will update it...", broker.name)
cf_cli.update_service_broker(*broker_args)
_enable_broker_access(broker)
for service_instance in broker.service_instances:
setup_service_instance(broker, service_instance)
def _enable_broker_access(broker):
"""Enables service access to the needed services.
If a broker has instances without "label" set, then the access will be set to the broker
itself (cf enable-service-access <broker_name>).
All instances that do have a label mark a different logical broker and access needs to be given
to them (for each unique label: cf enable-service-access <label>).
"""
try:
_log.info('Enabling access to service %s...', broker.name)
cf_cli.enable_service_access(broker.name)
except CommandFailedError as ex:
_log.warning("Failed to enable service access for broker %s.\nError: %s\n"
"Assuming the broker doesn't provide any service by itself...",
broker.name, str(ex))
labels = [instance.label for instance in broker.service_instances]
services = broker.services
for name in set(labels + services):
if name:
_log.info("Enabling access to service %s... ", name)
cf_cli.enable_service_access(name)
def setup_buildpack(buildpack_name, buildpacks_directory):
"""Sets up a buildpack. It will be updated if it exists. It will be created otherwise.
Newly created buildpack is always put in the first place of platform's buildpacks' list.
Args:
buildpack_name (str): Name of the buildpack.
buildpacks_directory (str): Path to a directory containing buildpacks.
It can be found in a platform release package, "apps" subdirectory.
Raises:
CommandFailedError: Failed to set up the buildpack.
"""
_log.info('Setting up buildpack %s...', buildpack_name)
buildpack_path = app_file.get_file_path(buildpack_name, buildpacks_directory)
try:
if _check_buildpack_needed(buildpack_name, buildpack_path):
_log.info('Buildpack %s exists, but in a different version. '
'Updating...', buildpack_name)
cf_cli.update_buildpack(buildpack_name, buildpack_path)
else:
_log.info('Buildpack %s is already present on the environment in this version. '
'Skipping...', buildpack_path)
except StopIteration:
_log.info('Buildpack %s not found in Cloud Foundry, will create it...', buildpack_name)
cf_cli.create_buildpack(buildpack_name, buildpack_path)
def _check_buildpack_needed(buildpack_name, buildpack_path):
buildpack_description = next(buildpack_descr for buildpack_descr in cf_cli.buildpacks()
if buildpack_descr.buildpack == buildpack_name)
buildpack_filename = path.basename(buildpack_path)
_log.debug('Buildpack in deployment package: %s; in environment: %s',
buildpack_filename, buildpack_description.filename)
return buildpack_filename != buildpack_description.filename
def setup_service_instance(broker, service_instance):
"""Sets up a service instance for a broker.
Args:
broker (`apployer.appstack.BrokerConfig`): Configuration of a service broker.
service_instance (`apployer.appstack.ServiceInstance`): Instance to be created
Raises:
CommandFailedError: Failed to set up the service instance.
"""
try:
cf_cli.service(service_instance.name)
_log.info('Service instance %s already exists, skipping it...', service_instance.name)
except CommandFailedError as ex:
_log.debug(str(ex))
_log.info("Getting properties of a service (%s) failed, assuming it doesn't exist yet.\n"
"Gonna create the service now...", service_instance.name)
broker_name = service_instance.label or broker.name
cf_cli.create_service(broker_name, service_instance.plan, service_instance.name)
_log.debug('Created instance %s of service %s.', service_instance.name, broker_name)
def _execute_post_actions(post_actions, artifacts_path):
for post_action in post_actions:
_log.info(str.format('Executing post action: {}', post_action.name))
for command in post_action.commands:
_log.info(str.format('Executing command: {}', command))
cf_cli.run_command(command, work_dir=artifacts_path,
skip_output=False, shell=True)
class UpsiDeployer(object):
"""Does the setup of a single user-provided service instance.
Attributes:
service (`apployer.appstack.UserProvidedService`): Service's configuration from the filled
expanded appstack.
Args:
service (`apployer.appstack.UserProvidedService`): See class attributes.
"""
def __init__(self, service):
self.service = service
@staticmethod
def _recreate_bindings(bindings):
"""Recreates the given service bindings..
Args:
bindings (list[dict]): List of dictionaries representing a binding.
Binding has "metadata" and "entity" fields.
"""
for binding in bindings:
service_guid = binding['entity']['service_instance_guid']
app_guid = binding['entity']['app_guid']
_log.debug('Rebinding %s to %s...', service_guid, app_guid)
cf_api.delete_service_binding(binding)
cf_api.create_service_binding(service_guid, app_guid)
def deploy(self):
"""Sets up a user provided service. It will be created if it doesn't exist.
It will be updated if it exists and its credentials in the live environment are | |
<gh_stars>1-10
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
package_data = []
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
blddir = join("bokehjs", "build")
bkjs_size = os.stat(join(blddir, "js", "bokeh.js")).st_size / 2**10
bkjs_min_size = os.stat(join(blddir, "js", "bokeh.min.js")).st_size / 2**10
bkcss_size = os.stat(join(blddir, "css", "bokeh.css")).st_size / 2**10
bkcss_min_size = os.stat(join(blddir, "css", "bokeh.min.css")).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % bkjs_size)
print(" - bokeh.css : %6.1f KB" % bkcss_size)
print(" - bokeh.min.js : %6.1f KB" % bkjs_min_size)
print(" - bokeh.min.css : %6.1f KB" % bkcss_min_size)
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
dir_util.remove_tree('build/lib/bokeh')
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild | |
atoms of types 0-0-0, 0-0-1, 0-1-1, and 1-1-1.
grid_start (float): Minimum atomic distance for which the grids are defined (cannot be 0.0)
grid_num_2b (int):number of points to use in the grid of the 2-body mapped potential
grid_num_3b (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the 2-body mapped potential
"""
def __init__(self, elements, r_cut, sigma_2b, sigma_3b, theta_2b, theta_3b, noise, rep_sig=1, **kwargs):
super().__init__()
self.elements = list(np.sort(elements))
self.r_cut = r_cut
self.rep_sig = rep_sig
kernel_2b = kernels.TwoBodyManySpeciesKernel(
theta=[sigma_2b, theta_2b, r_cut])
self.gp_2b = gp.GaussianProcess(
kernel=kernel_2b, noise=noise, **kwargs)
kernel_3b = kernels.ThreeBodyManySpeciesKernel(
theta=[sigma_3b, theta_3b, r_cut])
self.gp_3b = gp.GaussianProcess(
kernel=kernel_3b, noise=noise, **kwargs)
self.grid_2b, self.grid_3b, self.grid_start, self.grid_num_2b, self.grid_num_3b = {
}, {}, None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a 2- and
3-body single species force-force kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training forces and the 2-body predictions of force on the
training configurations
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodyManySpecies_ntr_%i.json" %(len(forces))
try:
model_2b = models.TwoBodyManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit(confs, forces, ncores=ncores)
ntr = len(confs)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
self.gp_3b.fit(confs, forces - two_body_forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species energy-energy kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training energies and the 2-body predictions of energies on the
training configurations.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodyManySpecies_ntr_%i.json" %(len(energies))
try:
model_2b = models.TwoBodyManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp_2b.fit_energy(glob_confs, energies, ncores=1)
ntr = len(glob_confs)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_energy(glob_confs, energies -
two_body_energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species force-force, energy-energy, and energy-forces kernel
functions. The 2-body Gaussian process is first fitted, then the 3-body GP
is fitted to the difference between the training energies (and forces) and
the 2-body predictions of energies (and forces) on the training configurations.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodyManySpecies_ntr_%i.json" %(len(forces) + len(energies))
try:
model_2b = models.TwoBodyManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_force_and_energy(
confs, forces - two_body_forces, glob_confs, energies - two_body_energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GPs
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_2b += rep_forces
else:
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_3b, std_3b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
return force_2b + force_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp_2b.predict(confs, return_std, ncores=ncores) + rep_forces + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict(confs, return_std, ncores=ncores) + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the local energies of the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_2b += rep_energies
else:
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_3b, std_3b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
return energy_2b + energy_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + rep_energies +\
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + \
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
def build_grid(self, start, num_2b, num_3b, ncores=1):
"""Function used to create the three different 2-body energy grids for
atoms of elements 0-0, 0-1, and 1-1, and the four different 3-body energy grids for
atoms of elements 0-0-0, 0-0-1, 0-1-1, and 1-1-1. The function calls the
``build_grid_3b`` function for each of the 3-body grids to build.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 3-body mapped potential
num (int): number of points to use in the grid of the 2-body mapped potentials
num_3b (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the 3-body mapped potentials
ncores (int): number of CPUs to use to calculate the energy predictions
"""
self.grid_start = start
self.grid_num_2b = num_2b
self.grid_num_3b = num_2b
perm_list_2b = list(combinations_with_replacement(self.elements, 2))
perm_list_3b = list(combinations_with_replacement(self.elements, 3))
dists_2b = np.linspace(start, self.r_cut, num_2b)
confs_2b = np.zeros((num_2b, 1, 5))
confs_2b[:, 0, 0] = dists_2b
for pair in perm_list_2b: # in this for loop, predicting then save for each individual one
confs_2b[:, 0, 3], confs_2b[:, 0,
4] = pair[0], pair[1]
mapped_energies = self.gp_2b.predict_energy(
list(confs_2b), ncores=ncores, mapping=True)
if self.rep_sig:
mapped_energies += utility.get_repulsive_energies(
confs_2b, self.rep_sig, mapping=True)
self.grid_2b[pair] = interpolation.Spline1D(dists_2b, mapped_energies)
dists_3b = np.linspace(start, self.r_cut, num_3b)
for trip in perm_list_3b:
self.grid_3b[trip] = self.build_grid_3b(
dists_3b, trip[0], trip[1], trip[2], ncores = ncores)
def build_grid_3b(self, dists, element_k, element_i, element_j, ncores=1):
""" Build a mapped 3-body potential.
Calculates the energy predicted by the GP for three atoms of elements element_i, element_j, element_k,
at all possible combinations of num distances ranging from start to | |
<reponame>rostock/olca<gh_stars>1-10
from flask import Flask, jsonify, redirect, request
from flask_compress import Compress
import math
import openlocationcode as olc
import pyproj as p
import re
import requests as req
from urllib.parse import quote_plus, unquote
# global constants: core functionality
QUERY_SEPARATOR_ = ','
QUERY_ADDITIONAL_SEPARATOR_ = ' '
OLC_EPSG_ = 4326
OLC_PRECISION_ = len(str(0.000125)[2:])
EARTH_RADIUS_ = 6371 # kilometers
# global constants: API
HTTP_OK_STATUS_ = 200
HTTP_ERROR_STATUS_ = 400
DEFAULT_EPSG_IN_ERROR_MESSAGE_ = 'value of optional \'epsg_in\' parameter is not a number'
DEFAULT_EPSG_OUT_ERROR_MESSAGE_ = 'value of optional \'epsg_out\' parameter is not a number'
DEFAULT_ERROR_MESSAGE_ = 'value of required \'query\' parameter is neither a valid pair of coordinates (required order: longitude/x,latitude/y) nor a valid Plus code'
DEFAULT_ERROR_REGIONAL_MESSAGE_ = 'provided regional Plus code is not valid or could not be resolved due to a non-reachable third party API'
DEFAULT_MAP_ERROR_MESSAGE_ = 'value of required \'bbox\' parameter is not a valid quadruple of coordinates (required order: southwest longitude/x,southwest latitude/y,northeast longitude/x,northeast latitude/y)'
# initialise application
app = Flask(__name__)
# import settings from configuration file
app.config.from_pyfile('settings.py', silent = True)
# initialise Compress
Compress(app)
# custom functions: core functionality
# extracts digits from a text
def digit_extractor(text):
# return digits if found in text, return (unchanged) text if not
if bool(re.search(r'\d', text)):
digit_list = re.findall(r'\d+', text)[0]
return ''.join(str(digit) for digit in digit_list)
else:
return text
# calculates the great circle distance of two geographical points
def distance_calculator(from_point_x, from_point_y, to_point_x, to_point_y):
from_point_x, from_point_y, to_point_x, to_point_y = map(math.radians, [from_point_x, from_point_y, to_point_x, to_point_y])
dlon = to_point_x - from_point_x
dlat = to_point_y - from_point_y
a = math.sin(dlat / 2) ** 2 + math.cos(from_point_y) * math.cos(to_point_y) * math.sin(dlon / 2) ** 2
# return calculated distance
return 2 * EARTH_RADIUS_ * math.asin(math.sqrt(a))
# returns a municipality centroid on querying a municipality name
def municipality_forward_searcher(municipality_name):
# get Nominatim base URL in forward geocoder mode (returning municipality centroids on querying municipality names) from settings
municipality_forward_url = app.config['MUNICIPALITY_FORWARD_URL']
# build the query string
query = '&city=' + municipality_name
# query Nominatim (via proxy if necessary), process the response and return the centroid pair of coordinates of the first municipality found
try:
response = req.get(municipality_forward_url + query, proxies = app.config['MUNICIPALITY_PROXY'], timeout = 3).json() if 'MUNICIPALITY_PROXY' in app.config else req.get(municipality_forward_url + query, timeout = 3).json()
for response_item in response:
if response_item['type'] == 'administrative' or response_item['type'] == 'city' or response_item['type'] == 'town':
return float(response_item['lon']), float(response_item['lat'])
return None, None
except:
return None, None
# returns a municipality name on querying a pair of coordinates (i.e. a municipality centroid)
def municipality_reverse_searcher(x, y, code_local):
# get Nominatim base URL in reverse geocoder mode (returning a municipality name on querying pairs of coordinates) from settings
municipality_reverse_url = app.config['MUNICIPALITY_REVERSE_URL']
# build the query string
query = '&lon=' + str(x) + '&lat=' + str(y)
# query Nominatim (via proxy if necessary) and return the municipality name
try:
response = req.get(municipality_reverse_url + query, proxies = app.config['MUNICIPALITY_PROXY'], timeout = 3).json() if 'MUNICIPALITY_PROXY' in app.config else req.get(municipality_reverse_url + query, timeout = 3).json()
return code_local + ', ' + response['name']
except:
return 'not definable'
# reprojects (transforms) a point from one EPSG code to another
def point_reprojector(transformer, source_x, source_y):
# return reprojected (transformed) point
return transformer.transform(source_x, source_y)
# Open Location Code (OLC) handler
def olc_handler(x, y, query, epsg_in, epsg_out, code_regional):
# if necessary…
if code_regional:
# decode queried regional Plus code if it is valid, return an error if not
municipality_centroid_x, municipality_centroid_y = municipality_forward_searcher(query[1])
try:
query = olc.recoverNearest(query[0], municipality_centroid_y, municipality_centroid_x)
recovered_coord = olc.decode(query)
recovered_center_x, recovered_center_y = recovered_coord.longitudeCenter, recovered_coord.latitudeCenter
# return an error if municipality centroid is further away than 0.25 degrees from the centroid of the recovered nearest matching code
if abs(abs(municipality_centroid_x) - abs(recovered_center_x)) > 0.25 or abs(abs(municipality_centroid_y) - abs(recovered_center_y)) > 0.25:
return { 'message': DEFAULT_ERROR_REGIONAL_MESSAGE_, 'status': HTTP_ERROR_STATUS_ }, HTTP_ERROR_STATUS_
except:
return { 'message': DEFAULT_ERROR_REGIONAL_MESSAGE_, 'status': HTTP_ERROR_STATUS_ }, HTTP_ERROR_STATUS_
# if a pair of coordinates was queried…
if query is None:
# transform if EPSG code of queried pair of coordinates is not equal to default EPSG code of OLC
if epsg_in != OLC_EPSG_:
try:
source_projection = p.Proj(init = 'epsg:' + str(epsg_in))
target_projection = p.Proj(init = 'epsg:' + str(OLC_EPSG_))
transformer = p.Transformer.from_proj(source_projection, target_projection)
x, y = point_reprojector(transformer, x, y)
except:
return { 'message': 'transformation of provided pair of coordinates (required order: longitude/x,latitude/y) not possible', 'status': HTTP_ERROR_STATUS_ }, HTTP_ERROR_STATUS_
# encode queried pair of coordinates
code = olc.encode(y, x)
# if not…
else:
# take query (as is) as the Plus code
code = query
# take care of short Plus code if necessary
code = code.split(olc.SEPARATOR_)[0].ljust(8, olc.PADDING_CHARACTER_) + olc.SEPARATOR_ if olc.isShort(code) else code
# determine the level
level = len(code.replace(olc.SEPARATOR_, '').rstrip(olc.PADDING_CHARACTER_)) / 2
# decode the Plus code to calculate the center pair of coordinates and the bbox
coord = olc.decode(code)
center_x, center_y = coord.longitudeCenter, coord.latitudeCenter
bbox_sw_x, bbox_sw_y = coord.longitudeLo, coord.latitudeLo
bbox_ne_x, bbox_ne_y = coord.longitudeHi, coord.latitudeHi
# get the full Plus code
code = olc.encode(center_y, center_x)
# transform all pairs of coordinates to be returned if EPSG code for all returned pairs of coordinates is not equal to default EPSG code of OLC, round to six decimals each if not
if epsg_out != OLC_EPSG_:
try:
source_projection = p.Proj(init = 'epsg:' + str(OLC_EPSG_))
target_projection = p.Proj(init = 'epsg:' + str(epsg_out))
transformer = p.Transformer.from_proj(source_projection, target_projection)
center_x, center_y = point_reprojector(transformer, center_x, center_y)
bbox_sw_x, bbox_sw_y = point_reprojector(transformer, bbox_sw_x, bbox_sw_y)
bbox_ne_x, bbox_ne_y = point_reprojector(transformer, bbox_ne_x, bbox_ne_y)
except Exception as e:
return { 'message': str(e), 'status': HTTP_ERROR_STATUS_ }, HTTP_ERROR_STATUS_
else:
center_x, center_y = round(center_x, OLC_PRECISION_), round(center_y, OLC_PRECISION_)
bbox_sw_x, bbox_sw_y = round(bbox_sw_x, OLC_PRECISION_), round(bbox_sw_y, OLC_PRECISION_)
bbox_ne_x, bbox_ne_y = round(bbox_ne_x, OLC_PRECISION_), round(bbox_ne_y, OLC_PRECISION_)
# build the bbox
bbox = [
[
[ bbox_sw_x, bbox_sw_y ],
[ bbox_ne_x, bbox_sw_y ],
[ bbox_ne_x, bbox_ne_y ],
[ bbox_sw_x, bbox_ne_y ],
[ bbox_sw_x, bbox_sw_y ]
]
]
# build the properties
properties = {
# longitude/x of the center pair of coordinates
'center_x': center_x,
# latitude/y of the center pair of coordinates
'center_y': center_y,
# grid level 1 code
'code_level_1': olc.encode(coord.latitudeCenter, coord.longitudeCenter, 2),
'epsg_in': epsg_in,
'epsg_out': epsg_out,
# grid level
'level': level
}
if level > 1:
# grid level 2 code
properties.update( { 'code_level_2': olc.encode(coord.latitudeCenter, coord.longitudeCenter, 4) } )
if level > 2:
# grid level 3 code
properties.update( { 'code_level_3': olc.encode(coord.latitudeCenter, coord.longitudeCenter, 6) } )
if level > 3:
# grid level 4 code
properties.update( { 'code_level_4': olc.encode(coord.latitudeCenter, coord.longitudeCenter, 8) } )
if level > 4:
# grid level 5 code, local code and short code (depending on the distance between the code center and the reference pair of coordinates)
code_local = code[4:]
properties.update( { 'code_level_5': code, 'code_local': code_local, 'code_short': olc.shorten(code, y, x) if query is None else olc.shorten(code, coord.latitudeCenter, coord.longitudeCenter) } )
# get all information for adding the regional Plus code if necessary
if app.config['CODE_REGIONAL_OUT']:
properties.update( { 'code_regional': municipality_reverse_searcher(coord.longitudeCenter, coord.latitudeCenter, code_local) } )
# return valid GeoJSON
return {
'type': 'Feature',
'properties': properties,
'geometry': {
'type': 'Polygon',
'coordinates': bbox
}
}, HTTP_OK_STATUS_
# OLC loop handler
def olc_loop_handler(min_x, min_y, max_x, max_y, epsg_in, epsg_out, mode):
# return points only if in labels mode, polygons if not
if mode == 'labels':
points_only = True
else:
points_only = False
# transform if EPSG code of input min/max x/y is not equal to default EPSG code of OLC
if epsg_in != OLC_EPSG_:
try:
source_projection = p.Proj(init = 'epsg:' + str(epsg_in))
target_projection = p.Proj(init = 'epsg:' + str(OLC_EPSG_))
transformer = p.Transformer.from_proj(source_projection, target_projection)
min_x, min_y = point_reprojector(transformer, min_x, min_y)
max_x, max_y = point_reprojector(transformer, max_x, max_y)
except:
return { 'message': 'transformation of provided quadruple of coordinates (required order: southwest longitude/x,southwest latitude/y,northeast longitude/x,northeast latitude/y) not possible', 'status': HTTP_ERROR_STATUS_ }, HTTP_ERROR_STATUS_
# calculate the OLC level the loop will take place within
distance = distance_calculator(min_x, min_y, max_x, max_y)
if distance <= 0.5:
level = 5
elif distance <= 5:
level = 4
elif distance <= 100:
level = 3
elif distance <= 500:
level = 2
else:
level = 1
# manipulate min/max x/y a bit to create a 10 % buffer around the initially provided bbox
bbox_width_buffer, bbox_height_buffer = (max_x - min_x) / 10, (max_y - min_y) / 10
min_x, max_x, min_y, max_y = min_x - bbox_width_buffer, | |
<gh_stars>0
import os
import re
import sys
import json
import time
import psutil
import shutil
import appdirs
import zipfile
import tarfile
import argparse
import platform
import requests
import traceback
import contextlib
import subprocess
import webbrowser
import logging.config
from pathlib2 import Path
from bs4 import BeautifulSoup
from backports import tempfile
from packaging.version import Version, InvalidVersion
try:
from urlparse import urlparse
except Exception:
from urllib.parse import urlparse
try:
from urllib2 import Request, urlopen
except ImportError:
from urllib.request import Request, urlopen
try:
import PySide
from PySide.QtCore import *
from PySide.QtGui import *
except ImportError:
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtGui import *
logging_name = '__logging__.ini'
logging_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), logging_name)
if not os.path.isfile(logging_path):
logging_path = os.path.join(os.path.dirname(sys.executable), logging_name)
if not os.path.isfile(logging_path):
if hasattr(sys, '_MEIPASS'):
logging_path = os.path.join(sys._MEIPASS, 'resources', logging_name)
logging.config.fileConfig(logging_path, disable_existing_loggers=False)
LOGGER = logging.getLogger('artellapipe-updater')
ARTELLA_NEXT_VERSION_FILE_NAME = 'version_to_run_next'
def is_windows():
return sys.platform.startswith('win')
def is_mac():
return sys.platform == 'darwin'
def is_linux():
return 'linux' in sys.platform
class ArtellaSplash(QSplashScreen, object):
def __init__(self, pixmap):
self._offset = QPoint()
super(ArtellaSplash, self).__init__(pixmap)
def mousePressEvent(self, event):
"""
Overrides base ArtellaDialog mousePressEvent function
:param event: QMouseEvent
"""
self._offset = event.pos()
def mouseMoveEvent(self, event):
"""
Overrides base ArtellaDialog mouseMoveEvent function
:param event: QMouseEvent
"""
x = event.globalX()
y = event.globalY()
x_w = self._offset.x()
y_w = self._offset.y()
self.move(x - x_w, y - y_w)
class ArtellaUpdaterException(Exception, object):
def __init__(self, exc):
if type(exc) in [str, unicode]:
exc = Exception(exc)
msg = '{} | {}'.format(exc, traceback.format_exc())
LOGGER.exception(msg)
traceback.print_exc()
QMessageBox.critical(None, 'Error', msg)
class ArtellaUpdater(QWidget, object):
def __init__(
self, app, project_name, project_type, app_version, deployment_repository, documentation_url=None,
deploy_tag=None, install_env_var=None, requirements_file_name=None, force_venv=False,
splash_path=None, script_path=None, requirements_path=None, artellapipe_configs_path=None,
dev=False, update_icon=False, parent=None):
super(ArtellaUpdater, self).__init__(parent=parent)
self._config_data = self._read_config()
if app and update_icon:
app.setWindowIcon(QIcon(self._get_resource(self._get_app_config('icon'))))
self._dev = dev
self._requirements_path = requirements_path if requirements_path else None
self._artella_configs_path = artellapipe_configs_path if artellapipe_configs_path else None
# We force development mode when we force a specific requirements file
if self._requirements_path and os.path.isfile(self._requirements_path):
self._dev = True
self._project_name = self._get_app_config('name') or project_name
self._project_type = self._get_app_config('type') or project_type
self._app_version = self._get_app_config('version') or app_version
self._repository = self._get_app_config('repository') or deployment_repository
self._splash_path = self._get_resource(self._get_app_config('splash')) or splash_path
self._force_venv = force_venv
self._venv_info = dict()
if self._project_name and not self._dev:
for proc in psutil.process_iter():
if proc.name().startswith(self._project_name) and proc.pid != psutil.Process().pid:
proc.kill()
self._setup_logger()
self._setup_config()
self._setup_ui()
QApplication.instance().processEvents()
self._install_path = None
self._selected_tag_index = None
self._documentation_url = documentation_url if documentation_url else self._get_default_documentation_url()
self._install_env_var = install_env_var if install_env_var else self._get_default_install_env_var()
self._requirements_file_name = requirements_file_name if requirements_file_name else 'requirements.txt'
self._all_tags = list()
self._deploy_tag = deploy_tag if deploy_tag else self._get_deploy_tag()
self._script_path = script_path if script_path and os.path.isfile(script_path) else self._get_script_path()
self._artella_app = 'lifecycler' if self._project_type == 'indie' else 'artella'
# If not valid tag is found we close the application
if not self._deploy_tag:
sys.exit()
valid_load = self._load()
if not valid_load:
sys.exit()
@property
def project_name(self):
return self._project_name
@property
def repository(self):
return self._repository
@property
def install_env_var(self):
return self._install_env_var
def get_clean_name(self):
"""
Return name of the project without spaces and lowercase
:return: str
"""
return self._project_name.replace(' ', '').lower()
def get_current_os(self):
"""
Return current OS the scrip is being executed on
:return:
"""
os_platform = platform.system()
if os_platform == 'Windows':
return 'Windows'
elif os_platform == 'Darwin':
return 'MacOS'
elif os_platform == 'Linux':
return 'Linux'
else:
raise Exception('No valid OS platform detected: {}!'.format(os_platform))
def get_config_data(self):
"""
Returns data in the configuration file
:return: dict
"""
data = dict()
config_path = self._get_config_path()
if not os.path.isfile(config_path):
return data
with open(config_path, 'r') as config_file:
try:
data = json.load(config_file)
except Exception:
data = dict()
return data
def is_python_installed(self):
"""
Returns whether current system has Python installed or not
:return: bool
"""
process = self._run_subprocess(commands_list=['python', '-c', 'quit()'], shell=False)
process.wait()
return True if process.returncode == 0 else False
def is_pip_installed(self):
"""
Returns whether pip is installed or not
:return: bool
"""
process = self._run_subprocess(commands_list=['pip', '-V'])
process.wait()
return True if process.returncode == 0 else False
def is_virtualenv_installed(self):
"""
Returns whether virtualenv is intsalled or not
:return: bool
"""
try:
process = self._run_subprocess(commands_list=['virtualenv', '--version'], shell=False)
process.wait()
except Exception:
return False
return True if process.returncode == 0 else False
def _read_config(self):
"""
Internal function that retrieves config data stored in executable
:return: dict
"""
data = {}
config_file_name = 'config.json'
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), config_file_name)
if not os.path.isfile(config_path):
config_path = os.path.join(os.path.dirname(sys.executable), 'resources', config_file_name)
if not os.path.isfile(config_path):
if hasattr(sys, '_MEIPASS'):
config_path = os.path.join(sys._MEIPASS, 'resources', config_file_name)
if not os.path.isfile(config_path):
return data
try:
with open(config_path) as config_file:
data = json.load(config_file)
except RuntimeError as exc:
raise Exception(exc)
return data
def _get_app_config(self, config_name):
"""
Returns configuration parameter stored in configuration, if exists
:param config_name: str
:return: str
"""
if not self._config_data:
return None
return self._config_data.get(config_name, None)
def _get_script_path(self):
script_path = None
config_file_name = 'launcher.py'
script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), config_file_name)
if not os.path.isfile(script_path):
script_path = os.path.join(os.path.dirname(sys.executable), 'resources', config_file_name)
if not os.path.isfile(script_path):
if hasattr(sys, '_MEIPASS'):
script_path = os.path.join(sys._MEIPASS, 'resources', config_file_name)
LOGGER.info('Launcher Script: "{}"'.format(script_path))
return script_path
def _get_resource(self, resource_name):
resource_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', resource_name)
if not os.path.isfile(resource_path):
resource_path = os.path.join(os.path.dirname(sys.executable), 'resources', resource_name)
if not os.path.isfile(resource_path):
if hasattr(sys, '_MEIPASS'):
resource_path = os.path.join(sys._MEIPASS, 'resources', resource_name)
LOGGER.info("Retrieving resource: {} >>> {}".format(resource_name, resource_path))
return resource_path
def _set_splash_text(self, new_text):
self._progress_text.setText(new_text)
QApplication.instance().processEvents()
def _setup_ui(self):
splash_pixmap = QPixmap(self._splash_path)
self._splash = ArtellaSplash(splash_pixmap)
self._splash.setWindowFlags(Qt.FramelessWindowHint)
splash_layout = QVBoxLayout()
splash_layout.setContentsMargins(5, 2, 5, 2)
splash_layout.setSpacing(2)
splash_layout.setAlignment(Qt.AlignBottom)
self._splash.setLayout(splash_layout)
label_style = """
QLabel
{
background-color: rgba(100, 100, 100, 100);
color: white;
border-radius: 5px;
}
"""
self._version_lbl = QLabel('v0.0.0')
self._version_lbl.setStyleSheet(label_style)
version_font = self._version_lbl.font()
version_font.setPointSize(10)
self._version_lbl.setFont(version_font)
self._artella_status_icon = QLabel()
self._artella_status_icon.setPixmap(QPixmap(self._get_resource('artella_off.png')).scaled(QSize(30, 30)))
install_path_icon = QLabel()
install_path_icon.setPixmap(QPixmap(self._get_resource('disk.png')).scaled(QSize(25, 25)))
self._install_path_lbl = QLabel('Install Path: ...')
self._install_path_lbl.setStyleSheet(label_style)
install_path_font = self._install_path_lbl.font()
install_path_font.setPointSize(8)
self._install_path_lbl.setFont(install_path_font)
deploy_tag_icon = QLabel()
deploy_tag_icon.setPixmap(QPixmap(self._get_resource('tag.png')).scaled(QSize(25, 25)))
self._deploy_tag_combo = QComboBox()
info_layout = QVBoxLayout()
info_layout.setContentsMargins(5, 5, 5, 5)
info_layout.setSpacing(10)
buttons_style = """
QPushButton:!hover
{
background-color: rgba(100, 100, 100, 100);
color: white;
border-radius: 5px;
}
QPushButton:hover
{
background-color: rgba(50, 50, 50, 100);
color: white;
border-radius: 5px;
}
QPushButton:pressed
{
background-color: rgba(15, 15, 15, 100);
color: white;
border-radius: 5px;
}
"""
self._launch_btn = QPushButton('Launch')
self._launch_btn.setStyleSheet(buttons_style)
self._launch_btn.setFixedWidth(150)
self._launch_btn.setFixedHeight(30)
self._launch_btn.setIconSize(QSize(40, 40))
self._launch_btn.setIcon(QPixmap(self._get_resource('play.png')))
self._close_btn = QPushButton('')
self._close_btn.setFlat(True)
self._close_btn.setFixedSize(QSize(30, 30))
self._close_btn.setIconSize(QSize(25, 25))
self._close_btn.setIcon(QPixmap(self._get_resource('close.png')))
self._open_install_folder_btn = QPushButton('Open Install Folder')
self._open_install_folder_btn.setStyleSheet(buttons_style)
self._open_install_folder_btn.setFixedWidth(150)
self._open_install_folder_btn.setFixedHeight(30)
self._open_install_folder_btn.setIconSize(QSize(25, 25))
self._open_install_folder_btn.setIcon(QPixmap(self._get_resource('search_folder.png')))
self._reinstall_btn = QPushButton('Reinstall')
self._reinstall_btn.setStyleSheet(buttons_style)
self._reinstall_btn.setFixedWidth(75)
self._reinstall_btn.setFixedHeight(30)
self._reinstall_btn.setIconSize(QSize(15, 15))
self._reinstall_btn.setIcon(QPixmap(self._get_resource('reinstall.png')))
self._uninstall_btn = QPushButton('Uninstall')
self._uninstall_btn.setStyleSheet(buttons_style)
self._uninstall_btn.setFixedWidth(75)
self._uninstall_btn.setFixedHeight(30)
self._uninstall_btn.setIconSize(QSize(20, 20))
self._uninstall_btn.setIcon(QPixmap(self._get_resource('uninstall.png')))
uninstall_reinstall_layout = QHBoxLayout()
uninstall_reinstall_layout.setSpacing(2)
uninstall_reinstall_layout.setContentsMargins(2, 2, 2, 2)
uninstall_reinstall_layout.addWidget(self._reinstall_btn)
uninstall_reinstall_layout.addWidget(self._uninstall_btn)
self._buttons_layout = QVBoxLayout()
self._buttons_layout.setContentsMargins(5, 5, 5, 5)
self._buttons_layout.setSpacing(2)
self._buttons_layout.addWidget(self._launch_btn)
self._buttons_layout.addWidget(self._open_install_folder_btn)
self._buttons_layout.addLayout(uninstall_reinstall_layout)
self._info_tag_btn = QPushButton()
self._info_tag_btn.setFlat(True)
self._info_tag_btn.setFixedSize(QSize(25, 25))
self._info_tag_btn.setIconSize(QSize(18, 18))
info_icon = QIcon()
info_icon.addPixmap(QPixmap(self._get_resource('info.png')).scaled(QSize(25, 25)))
self._info_tag_btn.setIcon(info_icon)
self._refresh_tag_btn = QPushButton()
self._refresh_tag_btn.setFlat(True)
self._refresh_tag_btn.setFixedSize(QSize(25, 25))
self._refresh_tag_btn.setIconSize(QSize(18, 18))
refresh_icon = QIcon()
refresh_icon.addPixmap(QPixmap(self._get_resource('refresh.png')).scaled(QSize(25, 25)))
self._refresh_tag_btn.setIcon(refresh_icon)
self._progress_text = QLabel('Setting {} ...'.format(self._project_name.title()))
self._progress_text.setAlignment(Qt.AlignCenter)
self._progress_text.setStyleSheet("QLabel { background-color : rgba(0, 0, 0, 180); color : white; }")
font = self._progress_text.font()
font.setPointSize(10)
self._progress_text.setFont(font)
second_layout = QHBoxLayout()
second_layout.setContentsMargins(5, 5, 5, 5)
second_layout.setSpacing(5)
second_layout.addItem(QSpacerItem(10, 0, QSizePolicy.Expanding, QSizePolicy.Preferred))
second_layout.addLayout(self._buttons_layout)
second_layout.addItem(QSpacerItem(10, 0, QSizePolicy.Expanding, QSizePolicy.Preferred))
splash_layout.addLayout(second_layout)
splash_layout.addWidget(self._progress_text)
self._artella_status_icon.setParent(self._splash)
self._version_lbl.setParent(self._splash)
self._close_btn.setParent(self._splash)
install_path_icon.setParent(self._splash)
self._install_path_lbl.setParent(self._splash)
deploy_tag_icon.setParent(self._splash)
self._deploy_tag_combo.setParent(self._splash)
self._info_tag_btn.setParent(self._splash)
self._refresh_tag_btn.setParent(self._splash)
self._artella_status_icon.setFixedSize(QSize(45, 45))
self._version_lbl.setFixedSize(50, 20)
install_path_icon.setFixedSize(QSize(35, 35))
self._install_path_lbl.setFixedSize(QSize(200, 20))
deploy_tag_icon.setFixedSize(QSize(35, 35))
self._deploy_tag_combo.setFixedSize(QSize(150, 20))
height = 5
self._version_lbl.move(10, self._splash.height() - 48)
self._artella_status_icon.move(5, height)
height += self._artella_status_icon.height() - 5
install_path_icon.move(5, height)
self._install_path_lbl.move(install_path_icon.width(), height + self._install_path_lbl.height() / 2 - 5)
height += install_path_icon.height() - 5
deploy_tag_icon.move(5, height)
height = height + self._deploy_tag_combo.height() / 2 - 5
self._deploy_tag_combo.move(deploy_tag_icon.width(), height)
self._info_tag_btn.move(self._deploy_tag_combo.width() + self._info_tag_btn.width() + 10, height - 2)
if not self._dev:
self._refresh_tag_btn.move(self._deploy_tag_combo.width() + self._refresh_tag_btn.width() + 10, height - 2)
else:
self._refresh_tag_btn.move(
self._deploy_tag_combo.width() + self._refresh_tag_btn.width() + self._info_tag_btn.width() + 10,
height - 2)
self._close_btn.move(self._splash.width() - self._close_btn.width() - 5, 0)
self._deploy_tag_combo.setFocusPolicy(Qt.NoFocus)
combo_width = 5
if self._dev:
self._deploy_tag_combo.setEnabled(False)
combo_width = 0
self._deploy_tag_combo.setStyleSheet("""
QComboBox:!editable
{
background-color: rgba(100, 100, 100, 100);
color: white;
border-radius: 5px;
padding: 1px 0px 1px 3px;
}
QComboBox::drop-down:!editable
{
background: rgba(50, 50, 50, 100);
border-top-right-radius: 5px;
border-bottom-right-radius: 5px;
image: none;
width: %dpx;
}
""" % combo_width)
self._close_btn.setVisible(False)
self._launch_btn.setVisible(False)
self._open_install_folder_btn.setVisible(False)
self._uninstall_btn.setVisible(False)
self._reinstall_btn.setVisible(False)
self._info_tag_btn.setVisible(False)
self._refresh_tag_btn.setVisible(False)
self._deploy_tag_combo.currentIndexChanged.connect(self._on_selected_tag)
self._close_btn.clicked.connect(sys.exit)
self._open_install_folder_btn.clicked.connect(self._on_open_installation_folder)
self._launch_btn.clicked.connect(self.launch)
self._reinstall_btn.clicked.connect(self._on_reinstall)
self._uninstall_btn.clicked.connect(self._on_uninstall)
self._info_tag_btn.clicked.connect(self._on_open_tag_info)
self._refresh_tag_btn.clicked.connect(self._on_refresh_tag)
self._splash.show()
self._splash.raise_()
def _open_folder(self, path=None):
"""
Opens a folder in the explorer in a independent platform way
If not path is passed the current directory will be opened
:param path: str, folder path to open
"""
if path is None:
path = os.path.curdir
if sys.platform == 'darwin':
self._check_call(commands_list=['open', '--', path])
elif sys.platform == 'linux2':
self._run_subprocess(commands_list=['xdg-open', path])
elif sys.platform is 'windows' or 'win32' or 'win64':
new_path = path.replace('/', '\\')
try:
self._check_call(commands_list=['explorer', new_path], shell=False)
except Exception:
pass
def _clean_folder(self, folder):
"""
Internal function that removes all the contents in the given folder
:param folder: str
"""
if | |
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import core.utils as utils
from core.config import cfg
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
import cv2
import numpy as np
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
import time
from flask import Flask, request, Response, jsonify, send_from_directory, abort
import os
import json
import requests
framework = 'tf'
weights_path = './checkpoints/yolov4-416'
size = 416
tiny = False
model = 'yolov4'
output_path = './detections/'
iou = 0.45
score = 0.25
class Flag:
tiny = tiny
model = model
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
FLAGS = Flag
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = size
# load model
if framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=weights_path)
else:
saved_model_loaded = tf.saved_model.load(weights_path, tags=[tag_constants.SERVING])
# Initialize Flask application
app = Flask(__name__)
print("loaded")
# API that returns JSON with classes found in images
@app.route('/detections/by-image-files', methods=['POST'])
def get_detections_by_image_files():
images = request.files.getlist("images")
image_path_list = []
for image in images:
image_name = image.filename
image_path_list.append("./temp/" + image_name)
image.save(os.path.join(os.getcwd(), "temp/", image_name))
# create list for final response
response = []
# loop through images in list and run Yolov4 model on each
for count, image_path in enumerate(image_path_list):
# create list of responses for current image
responses = []
try:
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
except cv2.error:
# remove temporary images
for name in image_path_list:
os.remove(name)
abort(404, "it is not an image file or image file is an unsupported format. try jpg or png")
except Exception as e:
# remove temporary images
for name in image_path_list:
os.remove(name)
print(e.__class__)
print(e)
abort(500)
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
t1 = time.time()
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
t2 = time.time()
print('time: {}'.format(t2 - t1))
t1 = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
t2 = time.time()
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
print('time: {}'.format(t2 - t1))
for i in range(valid_detections[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i]) * 100)),
"box": np.array(boxes[0][i]).tolist()
})
response.append({
"image": image_path_list[count][7:],
"detections": responses
})
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
image = utils.draw_bbox(original_image, pred_bbox, allowed_classes=allowed_classes)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imwrite(output_path + 'detection' + str(count) + '.png', image)
# remove temporary images
for name in image_path_list:
os.remove(name)
try:
return Response(response=json.dumps({"response": response}), mimetype="application/json")
except FileNotFoundError:
abort(404)
# API that returns image with detections on it
@app.route('/image/by-image-file', methods=['POST'])
def get_image_by_image_file():
image = request.files["images"]
image_path = "./temp/" + image.filename
image.save(os.path.join(os.getcwd(), image_path[2:]))
try:
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
except cv2.error:
# remove temporary image
os.remove(image_path)
abort(404, "it is not an image file or image file is an unsupported format. try jpg or png")
except Exception as e:
# remove temporary image
os.remove(image_path)
print(e.__class__)
print(e)
abort(500)
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
t1 = time.time()
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
t2 = time.time()
print('time: {}'.format(t2 - t1))
t1 = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
t2 = time.time()
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
print('time: {}'.format(t2 - t1))
for i in range(valid_detections[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
image = utils.draw_bbox(original_image, pred_bbox, allowed_classes=allowed_classes)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imwrite(output_path + 'detection' + '.png', image)
# prepare image for response
_, img_encoded = cv2.imencode('.png', image)
response = img_encoded.tostring()
# remove temporary image
os.remove(image_path)
try:
return Response(response=response, status=200, mimetype='image/png')
except FileNotFoundError:
abort(404)
# API that returns JSON with classes found in images from url list
@app.route('/detections/by-url-list', methods=['POST'])
def get_detections_by_url_list():
image_urls = request.get_json()["images"]
raw_image_list = []
if not isinstance(image_urls, list):
abort(400, "can't find image list")
image_names = []
custom_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}
for i, image_url in enumerate(image_urls):
image_name = "Image" + str(i + 1)
image_names.append(image_name)
try:
resp = requests.get(image_url, headers=custom_headers)
img_raw = np.asarray(bytearray(resp.content), dtype="uint8")
img_raw = cv2.imdecode(img_raw, cv2.IMREAD_COLOR)
except cv2.error:
abort(404, "it is not image url or that image is an unsupported format. try jpg or png")
except requests.exceptions.MissingSchema:
abort(400, "it is not url form")
except Exception as e:
print(e.__class__)
print(e)
abort(500)
raw_image_list.append(img_raw)
# create list for final response
response = []
# loop through images in list and run Yolov4 model on each
for count, raw_image in enumerate(raw_image_list):
# create list of responses for current image
responses = []
original_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
t1 = time.time()
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
t2 = time.time()
print('time: {}'.format(t2 - t1))
t1 = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
t2 = time.time()
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
print('time: {}'.format(t2 - t1))
for i in range(valid_detections[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i]) * 100)),
"box": np.array(boxes[0][i]).tolist()
})
response.append({
"image": image_names[count],
"detections": responses
})
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
image = utils.draw_bbox(original_image, pred_bbox, allowed_classes=allowed_classes)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imwrite(output_path + 'detection' + str(count) + '.png', image)
try:
return Response(response=json.dumps({"response": response}), mimetype="application/json")
except FileNotFoundError:
abort(404)
# API that returns image with detections on it from url
@app.route('/image/by-url', methods=['POST'])
def get_image_by_url():
image_urls = request.get_json()["images"]
if not isinstance(image_urls, list):
abort(400, "can't find image list")
image_names = []
custom_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}
image_name = "Image" + str(1)
image_names.append(image_name)
try:
resp = requests.get(image_urls[0], headers=custom_headers)
img_raw = np.asarray(bytearray(resp.content), dtype="uint8")
img_raw = cv2.imdecode(img_raw, cv2.IMREAD_COLOR)
except cv2.error:
abort(404, "it is not image url or that image is an unsupported format. try jpg or png")
except requests.exceptions.MissingSchema:
abort(400, "it is not url form")
except Exception as e:
print(e.__class__)
print(e)
abort(500)
# loop through images in list and run Yolov4 model on each
original_image = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data | |
our precision and recall scores. Hence, to avoid unexpected 'gotchas' later, it is good practice to have our categorical values be fed into our model as integers.
# >**Instructions:**
# * Convert the values in the 'label' column to numerical values using map method as follows:
# {'ham':0, 'spam':1} This maps the 'ham' value to 0 and the 'spam' value to 1.
# * Also, to get an idea of the size of the dataset we are dealing with, print out number of rows and columns using
# 'shape'.
# In[3]:
'''
Solution
'''
df['label'] = df.label.map({'ham': 0, 'spam': 1}) # TODO
df.shape #TODO
# ### Step 2.1: Bag of Words ###
#
# What we have here in our data set is a large collection of text data (5,572 rows of data). Most ML algorithms rely on numerical data to be fed into them as input, and email/sms messages are usually text heavy.
#
# Here we'd like to introduce the Bag of Words (BoW) concept which is a term used to specify the problems that have a 'bag of words' or a collection of text data that needs to be worked with. The basic idea of BoW is to take a piece of text and count the frequency of the words in that text. It is important to note that the BoW concept treats each word individually and the order in which the words occur does not matter.
#
# Using a process which we will go through now, we can convert a collection of documents to a matrix, with each document being a row and each word (token) being the column, and the corresponding (row, column) values being the frequency of occurrence of each word or token in that document.
#
# For example:
#
# Let's say we have 4 documents, which are text messages
# in our case, as follows:
#
# `['Hello, how are you!',
# 'Win money, win from home.',
# 'Call me now',
# 'Hello, Call you tomorrow?']`
#
# Our objective here is to convert this set of texts to a frequency distribution matrix, as follows:
#
# <img src="images/countvectorizer.png" height="542" width="542">
#
# Here as we can see, the documents are numbered in the rows, and each word is a column name, with the corresponding value being the frequency of that word in the document.
#
# Let's break this down and see how we can do this conversion using a small set of documents.
#
# To handle this, we will be using sklearn's
# [count vectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer) method which does the following:
#
# * It tokenizes the string (separates the string into individual words) and gives an integer ID to each token.
# * It counts the occurrence of each of those tokens.
#
# **Please Note:**
#
# * The CountVectorizer method automatically converts all tokenized words to their lower case form so that it does not treat words like 'He' and 'he' differently. It does this using the `lowercase` parameter which is by default set to `True`.
#
# * It also ignores all punctuation so that words followed by a punctuation mark (for example: 'hello!') are not treated differently than the same words not prefixed or suffixed by a punctuation mark (for example: 'hello'). It does this using the `token_pattern` parameter which has a default regular expression which selects tokens of 2 or more alphanumeric characters.
#
# * The third parameter to take note of is the `stop_words` parameter. Stop words refer to the most commonly used words in a language. They include words like 'am', 'an', 'and', 'the', etc. By setting this parameter value to `english`, CountVectorizer will automatically ignore all words (from our input text) that are found in the built in list of English stop words in scikit-learn. This is extremely helpful as stop words can skew our calculations when we are trying to find certain key words that are indicative of spam.
#
# We will dive into the application of each of these into our model in a later step, but for now it is important to be aware of such preprocessing techniques available to us when dealing with textual data.
# ### Step 2.2: Implementing Bag of Words from scratch ###
#
# Before we dive into scikit-learn's Bag of Words (BoW) library to do the dirty work for us, let's implement it ourselves first so that we can understand what's happening behind the scenes.
#
# **Step 1: Convert all strings to their lower case form.**
#
# Let's say we have a document set:
#
# ```
# documents = ['Hello, how are you!',
# 'Win money, win from home.',
# 'Call me now.',
# 'Hello, Call hello you tomorrow?']
# ```
# >>**Instructions:**
# * Convert all the strings in the documents set to their lower case. Save them into a list called 'lower_case_documents'. You can convert strings to their lower case in python by using the lower() method.
#
# In[4]:
'''
Solution:
'''
documents = ['Hello, how are you!',
'Win money, win from home.',
'Call me now.',
'Hello, Call hello you tomorrow?']
lower_case_documents = []
for i in documents:
lower_case_documents.append(i.lower()) # TODO
print(lower_case_documents)
# **Step 2: Removing all punctuation**
#
# >>**Instructions:**
# Remove all punctuation from the strings in the document set. Save the strings into a list called
# 'sans_punctuation_documents'.
# In[5]:
'''
Solution:
'''
sans_punctuation_documents = []
import string
for i in lower_case_documents:
sans_punctuation_documents.append(i.strip(string.punctuation)) #TODO
print(sans_punctuation_documents)
# **Step 3: Tokenization**
#
# Tokenizing a sentence in a document set means splitting up the sentence into individual words using a delimiter. The delimiter specifies what character we will use to identify the beginning and end of a word. Most commonly, we use a single space as the delimiter character for identifying words, and this is true in our documents in this case also.
# >>**Instructions:**
# Tokenize the strings stored in 'sans_punctuation_documents' using the split() method. Store the final document set
# in a list called 'preprocessed_documents'.
#
# In[6]:
'''
Solution:
'''
preprocessed_documents = []
remove_commas = [] #sting.punctuation did not get rid of commas, adding this list to get rid of commas
for i in sans_punctuation_documents:# TODO
remove_commas.append(i.replace(',', '')) #TODO
for i in remove_commas: #TODO
preprocessed_documents.append(i.split()) #TODO
print(preprocessed_documents)
# **Step 4: Count frequencies**
#
# Now that we have our document set in the required format, we can proceed to counting the occurrence of each word in each document of the document set. We will use the `Counter` method from the Python `collections` library for this purpose.
#
# `Counter` counts the occurrence of each item in the list and returns a dictionary with the key as the item being counted and the corresponding value being the count of that item in the list.
# >>**Instructions:**
# Using the Counter() method and preprocessed_documents as the input, create a dictionary with the keys being each word in each document and the corresponding values being the frequency of occurrence of that word. Save each Counter dictionary as an item in a list called 'frequency_list'.
#
# In[7]:
'''
Solution
'''
frequency_list = []
import pprint
from collections import Counter
for i in preprocessed_documents:
frequency_list.append(Counter(i))#TODO, element()method can be used to save just the dict part in frequency list.
pprint.pprint(frequency_list)
# Congratulations! You have implemented the Bag of Words process from scratch! As we can see in our previous output, we have a frequency distribution dictionary which gives a clear view of the text that we are dealing with.
#
# We should now have a solid understanding of what is happening behind the scenes in the `sklearn.feature_extraction.text.CountVectorizer` method of scikit-learn.
#
# We will now implement `sklearn.feature_extraction.text.CountVectorizer` method in the next step.
# ### Step 2.3: Implementing Bag of Words in scikit-learn ###
#
# Now that we have implemented the BoW concept from scratch, let's go ahead and use scikit-learn to do this process in a clean and succinct way. We will use the same document set as we used in the previous step.
# In[8]:
'''
Here we will look to create a frequency matrix on a smaller document set to make sure we understand how | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A Python implementation of the method described in [#a]_ and [#b]_ for
calculating Fourier coefficients for characterizing
closed contours.
References
----------
.. [#a] <NAME> and <NAME>, “Elliptic Fourier Features of a
Closed Contour," Computer Vision, Graphics and Image Processing,
Vol. 18, pp. 236-258, 1982.
.. [#b] <NAME>, <NAME> and <NAME>, “Feature Extraction
Methods for Character Recognition - A Survey”, Pattern Recognition
Vol. 29, No.4, pp. 641-662, 1996
Created by hbldh <<EMAIL>> on 2016-01-30.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import numpy
from sympy import *
class Model(object):
def __init__(self,order = 10,normalize = False):
#initialize the model
self.px, self.py, self.zx, self.zy, self.nx, self.ny = initEFDModel(order,normalize)
def elliptic_fourier_descriptors(contour, order=10, normalize=False):
"""Calculate elliptical Fourier descriptors for a contour.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:param int order: The order of Fourier coefficients to calculate.
:param bool normalize: If the coefficients should be normalized;
see references for details.
:return: A ``[order x 4]`` array of Fourier coefficients.
:rtype: :py:class:`numpy.ndarray`
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
phi = (2 * np.pi * t) / T
coeffs = np.zeros((order, 4))
for n in range(1, order + 1):
const = T / (2 * n * n * np.pi * np.pi)
phi_n = phi * n
d_cos_phi_n = np.cos(phi_n[1:]) - np.cos(phi_n[:-1])
d_sin_phi_n = np.sin(phi_n[1:]) - np.sin(phi_n[:-1])
a_n = const * np.sum((dxy[:, 0] / dt) * d_cos_phi_n)
b_n = const * np.sum((dxy[:, 0] / dt) * d_sin_phi_n)
c_n = const * np.sum((dxy[:, 1] / dt) * d_cos_phi_n)
d_n = const * np.sum((dxy[:, 1] / dt) * d_sin_phi_n)
coeffs[n - 1, :] = a_n, b_n, c_n, d_n
if normalize:
coeffs = normalize_efd(coeffs)
return coeffs
def normalize_efd(coeffs, size_invariant=True):
"""Normalizes an array of Fourier coefficients.
See [#a]_ and [#b]_ for details.
:param numpy.ndarray coeffs: A ``[n x 4]`` Fourier coefficient array.
:param bool size_invariant: If size invariance normalizing should be done as well.
Default is ``True``.
:return: The normalized ``[n x 4]`` Fourier coefficient array.
:rtype: :py:class:`numpy.ndarray`
"""
# Make the coefficients have a zero phase shift from
# the first major axis. Theta_1 is that shift angle.
theta_1 = 0.5 * np.arctan2(
2 * ((coeffs[0, 0] * coeffs[0, 1]) + (coeffs[0, 2] * coeffs[0, 3])),
((coeffs[0, 0] ** 2) - (coeffs[0, 1] ** 2) + (coeffs[0, 2] ** 2) - (coeffs[0, 3] ** 2)))
# Rotate all coefficients by theta_1.
for n in range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = np.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]]),
np.array([[np.cos(n * theta_1), -np.sin(n * theta_1)],
[np.sin(n * theta_1), np.cos(n * theta_1)]])).flatten()
# Make the coefficients rotation invariant by rotating so that
# the semi-major axis is parallel to the x-axis.
psi_1 = np.arctan2(coeffs[0, 2], coeffs[0, 0])
psi_rotation_matrix = np.array([[np.cos(psi_1), np.sin(psi_1)],
[-np.sin(psi_1), np.cos(psi_1)]])
# Rotate all coefficients by -psi_1.
for n in range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = psi_rotation_matrix.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]])).flatten()
if size_invariant:
# Obtain size-invariance by normalizing.
coeffs /= np.abs(coeffs[0, 0])
return coeffs
def calculate_dc_coefficients(contour):
"""Calculate the :math:`A_0` and :math:`C_0` coefficients of the elliptic Fourier series.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:return: The :math:`A_0` and :math:`C_0` coefficients.
:rtype: tuple
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
xi = np.cumsum(dxy[:, 0]) - (dxy[:, 0] / dt) * t[1:]
A0 = (1 / T) * np.sum(((dxy[:, 0] / (2 * dt)) * np.diff(t ** 2)) + xi * dt)
delta = np.cumsum(dxy[:, 1]) - (dxy[:, 1] / dt) * t[1:]
C0 = (1 / T) * np.sum(((dxy[:, 1] / (2 * dt)) * np.diff(t ** 2)) + delta * dt)
# A0 and CO relate to the first point of the contour array as origin.
# Adding those values to the coefficients to make them relate to true origin.
return contour[0, 0] + A0, contour[0, 1] + C0
def initEFDModel(order):
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
d = Symbol('d')
m = Symbol('m')
n = Symbol('n')
a1 = Symbol('a1')
a2 = Symbol('a2')
a3 = Symbol('a3')
a4 = Symbol('a4')
b1 = Symbol('b1')
b2 = Symbol('b2')
b3 = Symbol('b3')
b4 = Symbol('b4')
c1 = Symbol('c1')
c2 = Symbol('c2')
c3 = Symbol('c3')
c4 = Symbol('c4')
d1 = Symbol('d1')
d2 = Symbol('d2')
d3 = Symbol('d3')
d4 = Symbol('d4')
a_ = [a1, a2, a3, a4]
b_ = [b1, b2, b3, b4]
c_ = [c1, c2, c3, c4]
d_ = [d1, d2, d3, d4]
x = a * cos(2 * n * pi * m) + b * sin(2 * n * pi * m)
y = c * cos(2 * n * pi * m) + d * sin(2 * n * pi * m)
dx = x.diff(m)
dy = y.diff(m)
Zx_sym = 0
Zy_sym = 0
Px = lambdify((a, b, n, m), x)
Py = lambdify((c, d, n, m), y)
Zx = lambdify((a, b, n, m), dx)
Zy = lambdify((c, d, n, m), dy)
# precomputed symbolic stuff, will be good for real time
for n_ in range(order):
dx1 = dx.subs([(a, a_[n_]), (b, b_[n_]), (n, n_ + 1)])
dy1 = dy.subs([(c, c_[n_]), (d, d_[n_]), (n, n_ + 1)])
# symbolic value of dx,dy
Zx_sym += dx1
Zy_sym += dy1
Z = sqrt(Zx_sym ** 2 + Zy_sym ** 2)
dx_norm = Zx_sym / Z
dy_norm = Zy_sym / Z
ddx_norm = dx_norm.diff(m)
ddy_norm = dy_norm.diff(m)
tt = [m]
ax = a_ + b_ + c_ + d_ + tt
Nx = lambdify(ax, ddx_norm)
Ny = lambdify(ax, ddy_norm)
return Px, Py, Zx, Zy, Nx, Ny
def generateEFDModel(coeffs, locus, numPts, px, py, zx, zy, nx, ny):
m_ = np.linspace(0, 1.0, numPts)
Px = np.ones((numPts)) * locus[0]
Py = np.ones((numPts)) * locus[1]
Zx = 0
Zy = 0
a = []
b = []
c = []
d = []
# precompute symbollic stuff, will be good for real time
for n_ in range(coeffs.shape[0]):
a.append(coeffs[n_, 0])
b.append(coeffs[n_, 1])
c.append(coeffs[n_, 2])
d.append(coeffs[n_, 3])
Px += px(a[n_], b[n_], (n_ + 1), m_)
Py += py(c[n_], d[n_], (n_ + 1), m_)
Zx += zx(a[n_], b[n_], (n_ + 1), m_)
Zy += zy(c[n_], d[n_], (n_ + 1), m_)
# put together all the variables:
N = np.zeros((numPts, 3))
for i in range(0, numPts):
ax = a + b + c + d
ax.append(m_[i])
N[i, 0] = nx(*ax)
N[i, 1] = ny(*ax)
N[i, 2] = 0
# calculate norm of normal vector
# N = np.zeros((numPts, 3))
# N[:, 0] = Nx
# N[:, 1] = Ny
# N[:, 2] = 0
P = np.zeros((numPts, 3))
P[:, 0] = Px
P[:, 1] = Py
P[:, 2] = 0
C = np.linalg.norm(N, axis=1)
# cross product tells whether we have concave or convex curvature.
crossProd = np.zeros(len(Zx))
for ii in range(0, len(Zx)):
aa = np.array([Zx[ii], Zy[ii], 0])
bb = np.array(N[ii, :])
crossProd[ii] = np.cross(aa, bb)[2]
Cbar = np.sign(crossProd) * abs(C)
return P, N, Cbar
# def generateEFDModel(coeffs, locus=(0., 0.), numPts=300):
# a = Symbol('a')
# b = Symbol('b')
# c = Symbol('c')
# d = Symbol('d')
# m = Symbol('m')
# n = Symbol('n')
#
# x = a * cos(2 * n * pi * m) + b * sin(2 * n * pi * m)
# y = c * cos(2 * n * pi * m) + d * sin(2 * n * pi * m)
#
# dx = x.diff(m)
# dy = y.diff(m)
#
# m_ = np.linspace(0, 1.0, numPts)
#
# Px = np.ones((numPts)) * locus[0]
# Py = np.ones((numPts)) * locus[1]
#
# Zx = 0
# Zy = 0
#
# Zx_sym = 0
# Zy_sym = 0
#
# fx1 = lambdify((a, b, n, m), x)
# fy1 = lambdify((c, d, n, m), y)
# fdx1_norm = lambdify((a, b, n, m), dx)
# fdy1_norm | |
# 0
[0.29, 'rgb(204, 153, 102)'], # 1
[0.43, 'rgb(153, 102, 51)'], # 2
[0.57, 'rgb(115, 77, 38)'], # 3
[0.71, 'rgb(77, 51, 25)'], # 4
[1, 'rgb(38, 26, 13)']]), # 5
row = row_num, col = col_num)
fig7['layout'].update(plot_bgcolor = 'white',
title_text = "Poopy calendar",
yaxis_showticklabels = False,
yaxis7_showticklabels = False,
font = dict(size = 16))
plot(fig7)
# add % of that months poos for each day in hovertemplate
# %% Calendar plot of each day and a function of type/number/size of poos, darker colour for worse poos
# Correlation line
dataforfitline = np.zeros([np.size(scatterplot_df,0), 1])
j = 0
for i in scatterplot_df['Size of poo?']:
if i == 'Small':
dataforfitline[j] = 1
if i == 'Medium':
dataforfitline[j] = 2
if i == 'Poonarmi':
dataforfitline[j] = 3
j += 1
# Number of poos for each day
Num_type_of_poos = pd.DataFrame()
j = 0
for i in df['When did the poo occur? '].dt.strftime("%x").unique():
Num_type_of_poos.loc[j, 'Date'] = i
Num_type_of_poos.loc[j, 'Day'] = pd.to_datetime(i).strftime("%d")
Num_type_of_poos.loc[j, 'Month'] = pd.to_datetime(i).strftime("%b")
Num_type_of_poos.loc[j, 'Count'] = (df['When did the poo occur? '].dt.strftime("%x") == i).sum()
Num_type_of_poos.loc[j, 'Type'] = np.abs(int(df['Type of poop 💩? '][j]) - 4)
Num_type_of_poos.loc[j, 'Size'] = dataforfitline[j]
# Num_type_of_poos.loc[j, 'Size'] = df['Size of poo? '][j]
Num_type_of_poos.loc[j, 'Func_data'] = (Num_type_of_poos.loc[j, 'Count'] + Num_type_of_poos.loc[j, 'Type']) * Num_type_of_poos.loc[j, 'Size']
j += 1
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
#
#total_poos_in_month = []
#plot_titles = []
#j = 0
#for i in months:
# total_poos_in_month.append(int(Num_type_of_poos['Count'][Num_type_of_poos['Month'] == i].sum()))
# plot_titles.append(i + '<br>Total poopies = ' + str(total_poos_in_month[j]))
# j += 1
fig8 = make_subplots(rows = 2, cols = 6, shared_yaxes = True, subplot_titles = months)
year = 2020
row_num = 1
col_num = 0
for month in months:
col_num += 1
if col_num > 6:
col_num = 1
row_num = 2
MyMonthData = calendar.monthcalendar(2020, strptime(month, '%b').tm_mon)
z = MyMonthData[::-1]
m = 0
for i in z:
n = 0
for j in i:
if j == 0:
z[m].pop(n)
z[m].insert(n, '')
elif any((Num_type_of_poos['Day'] == str(j).zfill(2)) & (Num_type_of_poos['Month'] == month)) == False:
z[m].pop(n)
z[m].insert(n, 0)
else:
z[m].pop(n)
z[m].insert(n, int(Num_type_of_poos.loc[(Num_type_of_poos['Day'] == str(j).zfill(2)) & (Num_type_of_poos['Month'] == month), 'Func_data']))
n += 1
m += 1
name = []
for a in calendar.Calendar().monthdatescalendar(year, strptime(month, '%b').tm_mon):
for b in a:
name.append(b.strftime("%d %b %Y"))
name = np.reshape([inner for inner in name], (len(MyMonthData), 7))
name = name[::-1]
fig8.add_trace(go.Heatmap(
x = days,
y = list(range(len(MyMonthData), 0)),
z = z,
meta = name,
hovertemplate = 'Date: %{meta} <br>Poo impact: %{z}<extra></extra>',
xgap = 1, ygap = 1,
zmin = 0, zmax = max(Num_type_of_poos['Func_data']),
# colorscale = "turbid"),
colorscale = [
[0, 'rgb(249, 230, 217)'], # 0
[0.2, 'rgb(204, 153, 102)'], # 1
[0.4, 'rgb(153, 102, 51)'], # 2
[0.6, 'rgb(115, 77, 38)'], # 3
[0.8, 'rgb(80, 54, 28)'], # 4
[1, 'rgb(38, 26, 13)']]), # 5
row = row_num, col = col_num)
fig8['layout'].update(plot_bgcolor = 'white',
title_text = "Poopy calendar - Function of number of, size of, and type of poos",
yaxis_showticklabels = False,
yaxis7_showticklabels = False,
font = dict(size = 16))
plot(fig8)
# %% Distribution of poos on stool scale per day
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for day in days:
ydata.append((len(df['Type of poop 💩? '][Date_column == day])/Total_poos)*100)
fig9 = go.Figure()
fig9.add_trace(go.Bar(x = days,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
name = day,
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig9.update_layout(title = "Poo distribution by day", font = dict(size = 16))
fig9.update_yaxes(range=[0, 20], ticks = "inside", title = "Percentage of poos / %")
fig9.update_xaxes(title = "Day of week")
plot(fig9)
#should make this a stacked bar chart of type of poo stacked with the total number of poos as the overall height.
#%% Most frequent time of day
timerange = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23']
X_titles = [t + ':00' for t in timerange]
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Time_column = df['When did the poo occur? '].dt.strftime("%H")
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for t in timerange:
ydata.append((len(df['Type of poop 💩? '][Time_column == t])/Total_poos)*100)
fig10 = go.Figure()
fig10.add_trace(go.Bar(x = timerange,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig10.update_layout(title = "Poo distribution by time", font = dict(size = 16))
fig10.update_yaxes(range=[0, 20], ticks = "inside", title = "Percentage of poos / %")
fig10.update_xaxes(ticks = "inside", title = "Time of day", tickmode = 'array', tickvals = [int(t) for t in timerange], ticktext = X_titles)
plot(fig10)
# %% Distribution by type
Type_of_poop = [str(i) for i in range(1,8)] # 1 to 7
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for poo in Type_of_poop:
ydata.append((sum(df['Type of poop 💩? '] == poo)/Total_poos)*100)
fig11 = go.Figure()
fig11.add_trace(go.Bar(x = Type_of_poop,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig11.update_layout(title = "Poo distribution by type", font = dict(size = 16))
fig11.update_yaxes(range=[0, 60], ticks = "inside", title = "Percentage of poos / %")
fig11.update_xaxes(title = "Type of poo")
plot(fig11)
# %% Distribution by type excluding Jan and Feb
Type_of_poop = [str(i) for i in range(1,8)] # 1 to 7
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for poo in Type_of_poop:
ydata.append(sum(np.logical_and(df['Type of poop 💩? '] == poo, df['When did the poo occur? '].dt.strftime("%m") > '02')/Total_poos)*100)
fig12 = go.Figure()
fig12.add_trace(go.Bar(x = Type_of_poop,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig12.update_layout(title = "Poo distribution by type (excluding Jan and Feb)", font = dict(size = 16))
fig12.update_yaxes(range=[0, 60], ticks = "inside", title = "Percentage of poos / %")
fig12.update_xaxes(title = "Type of poo")
plot(fig12)
# %% Poo: The Musical
# 1812 Overture
p = vlc.MediaPlayer("1812 overture - Cut2.mp3")
p.play()
# Use Rain drop style visulisation
#def plot_the_poos():
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
if df['Size of poo? '][0] != 1 and df['Size of poo? '][0] != 2 and df['Size of poo? '][0] != 3:
df['Size of poo? '].replace(['Small', 'Medium', 'Poonarmi'], [1, 2, 3], inplace = True)
df = df.sort_values(by=['When did the poo occur? '], ascending = True)
# Number of poos for each day
Overture_of_poos = pd.DataFrame()
j = 0
for i in df['When did the poo occur? '].dt.strftime("%x").unique():
Overture_of_poos.loc[j, 'Date'] = i
Overture_of_poos.loc[j, 'Count'] = (df['When did the poo occur? '].dt.strftime("%x") == i).sum()
Overture_of_poos.loc[j, 'Poo impact'] = 1
Poo_type = df['Type of poop 💩? '][df['When did the poo occur? '].dt.strftime("%x") == i]
Poo_size = df['Size of poo? '][df['When did the poo occur? '].dt.strftime("%x") == i]
for a in Poo_type.index:
Overture_of_poos.loc[j, 'Poo impact'] += abs(int(Poo_type[a])-4) * Poo_size[a]
j += 1
# Fixing random state for reproducibility
np.random.seed(3)
# Create new Figure and an Axes which fills it.
fig = plt.figure(figsize=(7, 6))
ax = fig.add_axes([0, 0, 1, 1], frameon=False)
ax.set_xlim(0, 1), ax.set_xticks([])
ax.set_ylim(0, 1), ax.set_yticks([])
# Create rain data
n_drops = len(Overture_of_poos)
rain_drops = np.zeros(n_drops, dtype=[('position', float, 2),
('size', float, 1),
('growth', float, 1),
('color', float, 4)])
# Initialize the raindrops in random positions and with random growth rates.
rain_drops['position'] = np.random.uniform(0, 1, (n_drops, 2))
# Construct the scatter which we will update during animation
# as the raindrops develop.
scat = ax.scatter(rain_drops['position'][:, 0], rain_drops['position'][:, 1],
s = rain_drops['size'], lw = 0.5, edgecolors = 'white', facecolors = 'white')
#rain_drops['growth'] = 50 # np.random.uniform(50, 200, n_drops)
rain_drops['color'] = (102/255, 51/255, 0, 1)
def update(frame_number):
# Get an index which we can use to re-spawn the oldest raindrop.
current_index = frame_number % n_drops
# Make all colors more transparent as time progresses.
rain_drops['color'][:, 3] -= 0.05 # 1.0/len(rain_drops)
rain_drops['color'][:, 3] = np.clip(rain_drops['color'][:, 3], 0, 1)
# Make all circles | |
<reponame>Aashrut/Agent-Crop
import os
import shutil
import time
from flask_apscheduler import APScheduler
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from PIL import Image
import gdown
from flask import Flask, render_template, request, redirect, flash, send_from_directory
from werkzeug.utils import secure_filename
disease_map = {
0: 'Apple: Apple Scab',
1: 'Apple: Black Rot',
2: 'Apple: Cedar Rust',
3: 'Apple: Healthy',
4: 'Blueberry: Healthy',
5: 'Cherry: Powdery Mildew',
6: 'Cherry: Healthy',
7: 'Corn (Maize): Grey Leaf Spot',
8: 'Corn (Maize): Common Rust of Maize',
9: 'Corn (Maize): Northern Leaf Blight',
10: 'Corn (Maize): Healthy',
11: 'Grape: Black Rot',
12: 'Grape: Black Measles (Esca)',
13: 'Grape: Leaf Blight (Isariopsis Leaf Spot)',
14: 'Grape: Healthy',
15: 'Orange: Huanglongbing (Citrus Greening)',
16: 'Peach: Bacterial spot',
17: 'Peach: Healthy',
18: 'Bell Pepper: Bacterial Spot',
19: 'Bell Pepper: Healthy',
20: 'Potato: Early Blight',
21: 'Potato: Late Blight',
22: 'Potato: Healthy',
23: 'Raspberry: Healthy',
24: 'Rice: Brown Spot',
25: 'Rice: Hispa',
26: 'Rice: Leaf Blast',
27: 'Rice: Healthy',
28: 'Soybean: Healthy',
29: 'Squash: Powdery Mildew',
30: 'Strawberry: Leaf Scorch',
31: 'Strawberry: Healthy',
32: 'Tomato: Bacterial Spot',
33: 'Tomato: Early Blight',
34: 'Tomato: Late Blight',
35: 'Tomato: Leaf Mold',
36: 'Tomato: Septoria Leaf Spot',
37: 'Tomato: Spider Mites (Two-spotted Spider Mite)',
38: 'Tomato: Target Spot',
39: 'Tomato: Yellow Leaf Curl Virus',
40: 'Tomato: Mosaic Virus',
41: 'Tomato: Healthy'
}
details_map = {
'Apple: Apple Scab': [
'A serious disease of apples and ornamental crabapples, apple scab (Venturia inaequalis) attacks both leaves and fruit. The fungal disease forms pale yellow or olive-green spots on the upper surface of leaves. Dark, velvety spots may appear on the lower surface. Severely infected leaves become twisted and puckered and may drop early in the summer.',
'Symptoms on fruit are similar to those found on leaves. Scabby spots are sunken and tan and may have velvety spores in the center. As these spots mature, they become larger and turn brown and corky. Infected fruit becomes distorted and may crack allowing entry of secondary organisms. Severely affected fruit may drop, especially when young.',
'https://www.planetnatural.com/pest-problem-solver/plant-disease/apple-scab'],
'Apple: Black Rot': [
'Black rot is occasionally a problem on Minnesota apple trees. This fungal disease causes leaf spot, fruit rot and cankers on branches. Trees are more likely to be infected if they are: Not fully hardy in Minnesota, Infected with fire blight or Stressed by environmental factors like drought.',
'Large brown rotten areas can form anywhere on the fruit but are most common on the blossom end. Brown to black concentric rings can often be seen on larger infections. The flesh of the apple is brown but remains firm. Infected leaves develop "frog-eye leaf spot". These are circular spots with purplish or reddish edges and light tan interiors.',
'https://extension.umn.edu/plant-diseases/black-rot-apple'],
'Apple: Cedar Rust': [
'Cedar apple rust (Gymnosporangium juniperi-virginianae) is a fungal disease that requires juniper plants to complete its complicated two year life-cycle. Spores overwinter as a reddish-brown gall on young twigs of various juniper species. In early spring, during wet weather, these galls swell and bright orange masses of spores are blown by the wind where they infect susceptible apple and crab-apple trees. The spores that develop on these trees will only infect junipers the following year. From year to year, the disease must pass from junipers to apples to junipers again; it cannot spread between apple trees.',
'On apple and crab-apple trees, look for pale yellow pinhead sized spots on the upper surface of the leaves shortly after bloom. These gradually enlarge to bright orange-yellow spots which make the disease easy to identify. Orange spots may develop on the fruit as well. Heavily infected leaves may drop prematurely.',
'https://www.planetnatural.com/pest-problem-solver/plant-disease/cedar-apple-rust'],
'Apple: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Blueberry: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Cherry: Powdery Mildew': [
'Powdery mildew of sweet and sour cherry is caused by Podosphaera clandestina, an obligate biotrophic fungus. Mid- and late-season sweet cherry (Prunus avium) cultivars are commonly affected, rendering them unmarketable due to the covering of white fungal growth on the cherry surface. Season long disease control of both leaves and fruit is critical to minimize overall disease pressure in the orchard and consequently to protect developing fruit from accumulating spores on their surfaces.',
'Initial symptoms, often occurring 7 to 10 days after the onset of the first irrigation, are light roughly-circular, powdery looking patches on young, susceptible leaves (newly unfolded, and light green expanding leaves). Older leaves develop an age-related (ontogenic) resistance to powdery mildew and are naturally more resistant to infection than younger leaves. Look for early leaf infections on root suckers, the interior of the canopy or the crotch of the tree where humidity is high.',
'http://treefruit.wsu.edu/crop-protection/disease-management/cherry-powdery-mildew'],
'Cherry: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Corn (Maize): Grey Leaf Spot': [
'Gray leaf spot (GLS) is a common fungal disease in the United States caused by the pathogen Cercospora zeae-maydis in corn. Disease development is favored by warm temperatures, 80°F or 27 °C; and high humidity, relative humidity of 90% or higher for 12 hours or more. Cercospora zeae-maydis overwinters in corn residue, allowing inoculum to build up from year to year in fields. Cropping systems with reduced- or no-till and/or continuous corn are at higher risk for gray leaf spot outbreaks.',
'Gray leaf spot lesions begin as small necrotic pinpoints with chlorotic halos, these are more visible when leaves are backlit. Coloration of initial lesions can range from tan to brown before sporulation begins. Because early lesions are ambiguous, they are easily confused with other foliar diseases such as anthracnose leaf blight, eyespot, or common rust. As infection progresses, lesions begin to take on a more distinct shape. Lesion expansion is limited by parallel leaf veins, resulting in the blocky shaped “spots”. As sporulation commences, the lesions take on a more gray coloration.',
'https://www.pioneer.com/us/agronomy/gray_leaf_spot_cropfocus.html'],
'Corn (Maize): Common Rust of Maize': [
'Common rust is caused by the fungus Puccinia sorghi. Late occurring infections have limited impact on yield. The fungus overwinters on plants in southern states and airborne spores are wind-blown to northern states during the growing season. Disease development is favored by cool, moist weather (60 – 70◦ F).',
'Symptoms of common rust often appear after silking. Small, round to elongate brown pustules form on both leaf surfaces and other above ground parts of the plant. As the pustules mature they become brown to black. If disease is severe, the leaves may yellow and die early.',
'https://fieldcrops.cals.cornell.edu/corn/diseases-corn/common-rust'],
'Corn (Maize): Northern Leaf Blight': [
'Northern corn leaf blight caused by the fungus Exerohilum turcicum is a common leaf blight. If lesions begin early (before silking), crop loss can result. Late infections may have less of an impact on yield. Northern corn leaf blight is favored by wet humid cool weather typically found later in the growing season. Spores of the fungus that causes this disease can be transported by wind long distances from infected fields. Spread within and between fields locally also relies on wind blown spores.',
'The tan lesions of northern corn leaf blight are slender and oblong tapering at the ends ranging in size between 1 to 6 inches. Lesions run parallel to the leaf margins beginning on the lower leaves and moving up the plant. They may coalesce and cover the enter leaf. Spores are produced on the underside of the leaf below the lesions giving the appearance of a dusty green fuzz.',
'https://fieldcrops.cals.cornell.edu/corn/diseases-corn/northern-corn-leaf-blight'],
'Corn (Maize): Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take | |
if needed.
if defn.info.mro[-1].fullname() != 'builtins.object':
defn.info.mro.append(self.object_type().type)
def expr_to_analyzed_type(self, expr: Node) -> Type:
if isinstance(expr, CallExpr):
expr.accept(self)
info = self.check_namedtuple(expr)
if info is None:
# Some form of namedtuple is the only valid type that looks like a call
# expression. This isn't a valid type.
raise TypeTranslationError()
fallback = Instance(info, [])
return TupleType(info.tuple_type.items, fallback=fallback)
typ = expr_to_unanalyzed_type(expr)
return self.anal_type(typ)
def verify_base_classes(self, defn: ClassDef) -> bool:
info = defn.info
for base in info.bases:
baseinfo = base.type
if self.is_base_class(info, baseinfo):
self.fail('Cycle in inheritance hierarchy', defn)
# Clear bases to forcefully get rid of the cycle.
info.bases = []
if baseinfo.fullname() == 'builtins.bool':
self.fail("'%s' is not a valid base class" %
baseinfo.name(), defn)
return False
dup = find_duplicate(info.direct_base_classes())
if dup:
self.fail('Duplicate base class "%s"' % dup.name(), defn)
return False
return True
def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool:
"""Determine if t is a base class of s (but do not use mro)."""
# Search the base class graph for t, starting from s.
worklist = [s]
visited = {s}
while worklist:
nxt = worklist.pop()
if nxt == t:
return True
for base in nxt.bases:
if base.type not in visited:
worklist.append(base.type)
visited.add(base.type)
return False
def analyze_metaclass(self, defn: ClassDef) -> None:
if defn.metaclass:
sym = self.lookup_qualified(defn.metaclass, defn)
if sym is not None and not isinstance(sym.node, TypeInfo):
self.fail("Invalid metaclass '%s'" % defn.metaclass, defn)
def object_type(self) -> Instance:
return self.named_type('__builtins__.object')
def named_type(self, qualified_name: str, args: List[Type] = None) -> Instance:
sym = self.lookup_qualified(qualified_name, None)
return Instance(cast(TypeInfo, sym.node), args or [])
def named_type_or_none(self, qualified_name: str) -> Instance:
sym = self.lookup_fully_qualified_or_none(qualified_name)
if not sym:
return None
return Instance(cast(TypeInfo, sym.node), [])
def is_instance_type(self, t: Type) -> bool:
return isinstance(t, Instance)
def bind_class_type_variables_in_symbol_table(
self, info: TypeInfo) -> List[SymbolTableNode]:
vars = info.type_vars
nodes = [] # type: List[SymbolTableNode]
for index, var in enumerate(vars, 1):
node = self.bind_type_var(var, index, info)
nodes.append(node)
return nodes
def visit_import(self, i: Import) -> None:
for id, as_id in i.ids:
if as_id != id:
self.add_module_symbol(id, as_id, i)
else:
base = id.split('.')[0]
self.add_module_symbol(base, base, i)
def add_module_symbol(self, id: str, as_id: str, context: Context) -> None:
if id in self.modules:
m = self.modules[id]
self.add_symbol(as_id, SymbolTableNode(MODULE_REF, m, self.cur_mod_id), context)
else:
self.add_unknown_symbol(as_id, context)
def visit_import_from(self, i: ImportFrom) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
for id, as_id in i.names:
node = m.names.get(id, None)
if node:
node = self.normalize_type_alias(node, i)
if not node:
return
symbol = SymbolTableNode(node.kind, node.node,
self.cur_mod_id,
node.type_override)
self.add_symbol(as_id, symbol, i)
else:
message = "Module has no attribute '{}'".format(id)
extra = self.undefined_name_extra_info('{}.{}'.format(i_id, id))
if extra:
message += " {}".format(extra)
self.fail(message, i)
else:
for id, as_id in i.names:
self.add_unknown_symbol(as_id, i)
def normalize_type_alias(self, node: SymbolTableNode,
ctx: Context) -> SymbolTableNode:
if node.fullname in type_aliases:
# Node refers to an aliased type such as typing.List; normalize.
node = self.lookup_qualified(type_aliases[node.fullname], ctx)
return node
def correct_relative_import(self, node: Union[ImportFrom, ImportAll]) -> str:
if node.relative == 0:
return node.id
parts = self.cur_mod_id.split(".")
cur_mod_id = self.cur_mod_id
rel = node.relative
if self.cur_mod_node.is_package_init_file():
rel -= 1
if len(parts) < rel:
self.fail("Relative import climbs too many namespaces", node)
if rel != 0:
cur_mod_id = ".".join(parts[:-rel])
return cur_mod_id + (("." + node.id) if node.id else "")
def visit_import_all(self, i: ImportAll) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
for name, node in m.names.items():
node = self.normalize_type_alias(node, i)
if not name.startswith('_'):
self.add_symbol(name, SymbolTableNode(node.kind, node.node,
self.cur_mod_id), i)
else:
# Don't add any dummy symbols for 'from x import *' if 'x' is unknown.
pass
def add_unknown_symbol(self, name: str, context: Context) -> None:
var = Var(name)
var._fullname = self.qualified_name(name)
var.is_ready = True
var.type = AnyType()
self.add_symbol(name, SymbolTableNode(GDEF, var, self.cur_mod_id), context)
#
# Statements
#
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.block_depth[-1] += 1
for s in b.body:
s.accept(self)
self.block_depth[-1] -= 1
def visit_block_maybe(self, b: Block) -> None:
if b:
self.visit_block(b)
def anal_type(self, t: Type, allow_tuple_literal: bool = False) -> Type:
if t:
if allow_tuple_literal:
# Types such as (t1, t2, ...) only allowed in assignment statements. They'll
# generate errors elsewhere, and Tuple[t1, t2, ...] must be used instead.
if isinstance(t, TupleType):
# Unlike TypeAnalyser, also allow implicit tuple types (without Tuple[...]).
star_count = sum(1 for item in t.items if isinstance(item, StarType))
if star_count > 1:
self.fail('At most one star type allowed in a tuple', t)
return None
items = [self.anal_type(item, True)
for item in t.items]
return TupleType(items, self.builtin_type('builtins.tuple'), t.line)
a = TypeAnalyser(self.lookup_qualified,
self.lookup_fully_qualified,
self.fail)
return t.accept(a)
else:
return None
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
for lval in s.lvalues:
self.analyze_lvalue(lval, explicit_type=s.type is not None)
s.rvalue.accept(self)
if s.type:
allow_tuple_literal = isinstance(s.lvalues[-1], (TupleExpr, ListExpr))
s.type = self.anal_type(s.type, allow_tuple_literal)
else:
# For simple assignments, allow binding type aliases.
if (s.type is None and len(s.lvalues) == 1 and
isinstance(s.lvalues[0], NameExpr)):
res = analyze_type_alias(s.rvalue,
self.lookup_qualified,
self.lookup_fully_qualified,
self.fail)
if res and (not isinstance(res, Instance) or cast(Instance, res).args):
# TODO: What if this gets reassigned?
name = cast(NameExpr, s.lvalues[0])
node = self.lookup(name.name, name)
node.kind = TYPE_ALIAS
node.type_override = res
if isinstance(s.rvalue, IndexExpr):
s.rvalue.analyzed = TypeAliasExpr(res)
if s.type:
# Store type into nodes.
for lvalue in s.lvalues:
self.store_declared_types(lvalue, s.type)
self.check_and_set_up_type_alias(s)
self.process_typevar_declaration(s)
self.process_namedtuple_definition(s)
def check_and_set_up_type_alias(self, s: AssignmentStmt) -> None:
"""Check if assignment creates a type alias and set it up as needed."""
# For now, type aliases only work at the top level of a module.
if (len(s.lvalues) == 1 and not self.is_func_scope() and not self.type
and not s.type):
lvalue = s.lvalues[0]
if isinstance(lvalue, NameExpr):
if not lvalue.is_def:
# Only a definition can create a type alias, not regular assignment.
return
rvalue = s.rvalue
if isinstance(rvalue, RefExpr):
node = rvalue.node
if isinstance(node, TypeInfo):
# TODO: We should record the fact that this is a variable
# that refers to a type, rather than making this
# just an alias for the type.
self.globals[lvalue.name].node = node
def analyze_lvalue(self, lval: Node, nested: bool = False,
add_global: bool = False,
explicit_type: bool = False) -> None:
"""Analyze an lvalue or assignment target.
Only if add_global is True, add name to globals table. If nested
is true, the lvalue is within a tuple or list lvalue expression.
"""
if isinstance(lval, NameExpr):
nested_global = (not self.is_func_scope() and
self.block_depth[-1] > 0 and
not self.type)
if (add_global or nested_global) and lval.name not in self.globals:
# Define new global name.
v = Var(lval.name)
v._fullname = self.qualified_name(lval.name)
v.is_ready = False # Type not inferred yet
lval.node = v
lval.is_def = True
lval.kind = GDEF
lval.fullname = v._fullname
self.globals[lval.name] = SymbolTableNode(GDEF, v,
self.cur_mod_id)
elif isinstance(lval.node, Var) and lval.is_def:
# Since the is_def flag is set, this must have been analyzed
# already in the first pass and added to the symbol table.
v = cast(Var, lval.node)
assert v.name() in self.globals
elif (self.is_func_scope() and lval.name not in self.locals[-1] and
lval.name not in self.global_decls[-1] and
lval.name not in self.nonlocal_decls[-1]):
# Define new local name.
v = Var(lval.name)
lval.node = v
lval.is_def = True
lval.kind = LDEF
lval.fullname = lval.name
self.add_local(v, lval)
elif not self.is_func_scope() and (self.type and
lval.name not in self.type.names):
# Define a new attribute within class body.
v = Var(lval.name)
v.info = self.type
v.is_initialized_in_class = True
lval.node = v
lval.is_def = True
lval.kind = MDEF
lval.fullname = lval.name
self.type.names[lval.name] = SymbolTableNode(MDEF, v)
else:
# Bind to an existing name.
if explicit_type:
self.name_already_defined(lval.name, lval)
lval.accept(self)
self.check_lvalue_validity(lval.node, lval)
elif isinstance(lval, MemberExpr):
if not add_global:
self.analyze_member_lvalue(lval)
if explicit_type and not self.is_self_member_ref(lval):
self.fail('Type cannot be declared in assignment to non-self '
'attribute', lval)
elif isinstance(lval, IndexExpr):
if explicit_type:
self.fail('Unexpected type declaration', lval)
if not add_global:
lval.accept(self)
elif (isinstance(lval, TupleExpr) or
isinstance(lval, ListExpr)):
items = cast(Any, lval).items
if len(items) == 0 and isinstance(lval, TupleExpr):
self.fail("Can't assign to ()", lval)
self.analyze_tuple_or_list_lvalue(cast(Union[ListExpr, TupleExpr], lval),
add_global, explicit_type)
elif isinstance(lval, StarExpr):
if nested:
self.analyze_lvalue(lval.expr, nested, add_global, explicit_type)
else:
self.fail('Starred assignment target must be in a list or tuple', lval)
else:
self.fail('Invalid assignment target', lval)
def analyze_tuple_or_list_lvalue(self, lval: Union[ListExpr, TupleExpr],
add_global: bool = False,
explicit_type: bool = False) -> None:
"""Analyze an | |
details are given below.
:param pulumi.Input[str] version_id: Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if destination_id is not None:
pulumi.set(__self__, "destination_id", destination_id)
if elasticsearch_configuration is not None:
pulumi.set(__self__, "elasticsearch_configuration", elasticsearch_configuration)
if extended_s3_configuration is not None:
pulumi.set(__self__, "extended_s3_configuration", extended_s3_configuration)
if http_endpoint_configuration is not None:
pulumi.set(__self__, "http_endpoint_configuration", http_endpoint_configuration)
if kinesis_source_configuration is not None:
pulumi.set(__self__, "kinesis_source_configuration", kinesis_source_configuration)
if name is not None:
pulumi.set(__self__, "name", name)
if redshift_configuration is not None:
pulumi.set(__self__, "redshift_configuration", redshift_configuration)
if s3_configuration is not None:
pulumi.set(__self__, "s3_configuration", s3_configuration)
if server_side_encryption is not None:
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
if splunk_configuration is not None:
pulumi.set(__self__, "splunk_configuration", splunk_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if version_id is not None:
pulumi.set(__self__, "version_id", version_id)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) specifying the Stream
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter(name="destinationId")
def destination_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "destination_id")
@destination_id.setter
def destination_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_id", value)
@property
@pulumi.getter(name="elasticsearchConfiguration")
def elasticsearch_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]:
"""
Configuration options if elasticsearch is the destination. More details are given below.
"""
return pulumi.get(self, "elasticsearch_configuration")
@elasticsearch_configuration.setter
def elasticsearch_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]):
pulumi.set(self, "elasticsearch_configuration", value)
@property
@pulumi.getter(name="extendedS3Configuration")
def extended_s3_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]:
"""
Enhanced configuration options for the s3 destination. More details are given below.
"""
return pulumi.get(self, "extended_s3_configuration")
@extended_s3_configuration.setter
def extended_s3_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]):
pulumi.set(self, "extended_s3_configuration", value)
@property
@pulumi.getter(name="httpEndpointConfiguration")
def http_endpoint_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]:
"""
Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "http_endpoint_configuration")
@http_endpoint_configuration.setter
def http_endpoint_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]):
pulumi.set(self, "http_endpoint_configuration", value)
@property
@pulumi.getter(name="kinesisSourceConfiguration")
def kinesis_source_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]:
"""
Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
"""
return pulumi.get(self, "kinesis_source_configuration")
@kinesis_source_configuration.setter
def kinesis_source_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]):
pulumi.set(self, "kinesis_source_configuration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="redshiftConfiguration")
def redshift_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']]:
"""
Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "redshift_configuration")
@redshift_configuration.setter
def redshift_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']]):
pulumi.set(self, "redshift_configuration", value)
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']]:
"""
Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
"""
return pulumi.get(self, "s3_configuration")
@s3_configuration.setter
def s3_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']]):
pulumi.set(self, "s3_configuration", value)
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']]:
"""
Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
"""
return pulumi.get(self, "server_side_encryption")
@server_side_encryption.setter
def server_side_encryption(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']]):
pulumi.set(self, "server_side_encryption", value)
@property
@pulumi.getter(name="splunkConfiguration")
def splunk_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']]:
"""
Configuration options if splunk is the destination. More details are given below.
"""
return pulumi.get(self, "splunk_configuration")
@splunk_configuration.setter
def splunk_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']]):
pulumi.set(self, "splunk_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
return pulumi.get(self, "version_id")
@version_id.setter
def version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_id", value)
class FirehoseDeliveryStream(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
destination_id: Optional[pulumi.Input[str]] = None,
elasticsearch_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]] = None,
extended_s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]] = None,
http_endpoint_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]] = None,
kinesis_source_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
redshift_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamRedshiftConfigurationArgs']]] = None,
s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamS3ConfigurationArgs']]] = None,
server_side_encryption: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamServerSideEncryptionArgs']]] = None,
splunk_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamSplunkConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.
For more details, see the [Amazon Kinesis Firehose Documentation](https://aws.amazon.com/documentation/firehose/).
## Example Usage
### Extended S3 Destination
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_iam = aws.iam.Role("lambdaIam", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_processor = aws.lambda_.Function("lambdaProcessor",
code=pulumi.FileArchive("lambda.zip"),
role=lambda_iam.arn,
handler="exports.handler",
runtime="nodejs12.x")
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
destination="extended_s3",
extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
)],
)],
),
))
```
### S3 Destination (deprecated)
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="s3",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
))
```
### Redshift Destination
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.redshift.Cluster("testCluster",
cluster_identifier="tf-redshift-cluster",
database_name="test",
master_username="testuser",
master_password="<PASSWORD>",
node_type="dc1.large",
cluster_type="single-node")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="redshift",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
redshift_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
cluster_jdbcurl=pulumi.Output.all(test_cluster.endpoint, test_cluster.database_name).apply(lambda endpoint, database_name: f"jdbc:redshift://{endpoint}/{database_name}"),
username="testuser",
password="<PASSWORD>",
data_table_name="test-table",
copy_options="delimiter '|'",
data_table_columns="test-col",
s3_backup_mode="Enabled",
s3_backup_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=15,
buffer_interval=300,
compression_format="GZIP",
),
))
```
### Elasticsearch Destination
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose_role"]["arn"],
index_name="test",
type_name="test",
processing_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
)],
)],
),
))
```
### Elasticsearch Destination With VPC
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster",
cluster_config=aws.elasticsearch.DomainClusterConfigArgs(
instance_count=2,
zone_awareness_enabled=True,
instance_type="t2.small.elasticsearch",
),
ebs_options=aws.elasticsearch.DomainEbsOptionsArgs(
ebs_enabled=True,
volume_size=10,
),
vpc_options=aws.elasticsearch.DomainVpcOptionsArgs(
security_group_ids=[aws_security_group["first"]["id"]],
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
))
firehose_elasticsearch = aws.iam.RolePolicy("firehose-elasticsearch",
role=aws_iam_role["firehose"]["id"],
policy=pulumi.Output.all(test_cluster.arn, test_cluster.arn).apply(lambda testClusterArn, testClusterArn1: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"{test_cluster_arn}",
"{test_cluster_arn1}/*"
]
}},
{{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}}
]
}}
\"\"\"))
test = aws.kinesis.FirehoseDeliveryStream("test",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose"]["arn"],
index_name="test",
type_name="test",
vpc_config=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs(
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
security_group_ids=[aws_security_group["first"]["id"]],
role_arn=aws_iam_role["firehose"]["arn"],
),
),
opts=pulumi.ResourceOptions(depends_on=[firehose_elasticsearch]))
```
### Splunk Destination
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="splunk",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
splunk_configuration=aws.kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs(
hec_endpoint="https://http-inputs-mydomain.splunkcloud.com:443",
hec_token="5<PASSWORD>",
hec_acknowledgment_timeout=600,
hec_endpoint_type="Event",
s3_backup_mode="FailedEventsOnly",
))
```
### HTTP Endpoint (e.g. New Relic) Destination
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="http_endpoint",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
http_endpoint_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs(
url="https://aws-api.newrelic.com/firehose/v1",
name="<NAME>",
access_key="my-key",
buffering_size=15,
buffering_interval=600,
role_arn=aws_iam_role["firehose"]["arn"],
s3_backup_mode="FailedDataOnly",
request_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs(
content_encoding="GZIP",
common_attributes=[
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname",
value="testvalue",
),
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname2",
value="testvalue2",
),
],
),
))
```
## Import
Kinesis Firehose Delivery streams can be imported using the stream ARN, e.g.
```sh
$ pulumi import aws:kinesis/firehoseDeliveryStream:FirehoseDeliveryStream foo arn:aws:firehose:us-east-1:XXX:deliverystream/example
```
NoteImport does not work for stream destination `s3`. Consider using `extended_s3` since `s3` destination is deprecated.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream
:param pulumi.Input[str] destination: This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamElasticsearchConfigurationArgs']] elasticsearch_configuration: Configuration options if elasticsearch is the destination. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']] extended_s3_configuration: Enhanced configuration options for | |
not None:
return fstring
return self.visit_BinopNode(node)
_parse_string_format_regex = (
u'(%(?:' # %...
u'(?:[0-9]+|[ ])?' # width (optional) or space prefix fill character (optional)
u'(?:[.][0-9]+)?' # precision (optional)
u')?.)' # format type (or something different for unsupported formats)
)
def _build_fstring(self, pos, ustring, format_args):
# Issues formatting warnings instead of errors since we really only catch a few errors by accident.
args = iter(format_args)
substrings = []
can_be_optimised = True
for s in re.split(self._parse_string_format_regex, ustring):
if not s:
continue
if s == u'%%':
substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u'%'), constant_result=u'%'))
continue
if s[0] != u'%':
if s[-1] == u'%':
warning(pos, "Incomplete format: '...%s'" % s[-3:], level=1)
can_be_optimised = False
substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(s), constant_result=s))
continue
format_type = s[-1]
try:
arg = next(args)
except StopIteration:
warning(pos, "Too few arguments for format placeholders", level=1)
can_be_optimised = False
break
if format_type in u'asrfdoxX':
format_spec = s[1:]
if format_type in u'doxX' and u'.' in format_spec:
# Precision is not allowed for integers in format(), but ok in %-formatting.
can_be_optimised = False
elif format_type in u'ars':
format_spec = format_spec[:-1]
substrings.append(ExprNodes.FormattedValueNode(
arg.pos, value=arg,
conversion_char=format_type if format_type in u'ars' else None,
format_spec=ExprNodes.UnicodeNode(
pos, value=EncodedString(format_spec), constant_result=format_spec)
if format_spec else None,
))
else:
# keep it simple for now ...
can_be_optimised = False
if not can_be_optimised:
# Print all warnings we can find before finally giving up here.
return None
try:
next(args)
except StopIteration: pass
else:
warning(pos, "Too many arguments for format placeholders", level=1)
return None
node = ExprNodes.JoinedStrNode(pos, values=substrings)
return self.visit_JoinedStrNode(node)
def visit_FormattedValueNode(self, node):
self.visitchildren(node)
conversion_char = node.conversion_char or 's'
if isinstance(node.format_spec, ExprNodes.UnicodeNode) and not node.format_spec.value:
node.format_spec = None
if node.format_spec is None and isinstance(node.value, ExprNodes.IntNode):
value = EncodedString(node.value.value)
if value.isdigit():
return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
if node.format_spec is None and conversion_char == 's':
value = None
if isinstance(node.value, ExprNodes.UnicodeNode):
value = node.value.value
elif isinstance(node.value, ExprNodes.StringNode):
value = node.value.unicode_value
if value is not None:
return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
return node
def visit_JoinedStrNode(self, node):
"""
Clean up after the parser by discarding empty Unicode strings and merging
substring sequences. Empty or single-value join lists are not uncommon
because f-string format specs are always parsed into JoinedStrNodes.
"""
self.visitchildren(node)
unicode_node = ExprNodes.UnicodeNode
values = []
for is_unode_group, substrings in itertools.groupby(node.values, lambda v: isinstance(v, unicode_node)):
if is_unode_group:
substrings = list(substrings)
unode = substrings[0]
if len(substrings) > 1:
value = EncodedString(u''.join(value.value for value in substrings))
unode = ExprNodes.UnicodeNode(unode.pos, value=value, constant_result=value)
# ignore empty Unicode strings
if unode.value:
values.append(unode)
else:
values.extend(substrings)
if not values:
value = EncodedString('')
node = ExprNodes.UnicodeNode(node.pos, value=value, constant_result=value)
elif len(values) == 1:
node = values[0]
elif len(values) == 2:
# reduce to string concatenation
node = ExprNodes.binop_node(node.pos, '+', *values)
else:
node.values = values
return node
def visit_MergedDictNode(self, node):
"""Unpack **args in place if we can."""
self.visitchildren(node)
args = []
items = []
def add(arg):
if arg.is_dict_literal:
if items:
items[0].key_value_pairs.extend(arg.key_value_pairs)
else:
items.append(arg)
elif isinstance(arg, ExprNodes.MergedDictNode):
for child_arg in arg.keyword_args:
add(child_arg)
else:
if items:
args.append(items[0])
del items[:]
args.append(arg)
for arg in node.keyword_args:
add(arg)
if items:
args.append(items[0])
if len(args) == 1:
arg = args[0]
if arg.is_dict_literal or isinstance(arg, ExprNodes.MergedDictNode):
return arg
node.keyword_args[:] = args
self._calculate_const(node)
return node
def visit_MergedSequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
is_set = node.type is Builtin.set_type
args = []
values = []
def add(arg):
if (is_set and arg.is_set_literal) or (arg.is_sequence_constructor and not arg.mult_factor):
if values:
values[0].args.extend(arg.args)
else:
values.append(arg)
elif isinstance(arg, ExprNodes.MergedSequenceNode):
for child_arg in arg.args:
add(child_arg)
else:
if values:
args.append(values[0])
del values[:]
args.append(arg)
for arg in node.args:
add(arg)
if values:
args.append(values[0])
if len(args) == 1:
arg = args[0]
if ((is_set and arg.is_set_literal) or
(arg.is_sequence_constructor and arg.type is node.type) or
isinstance(arg, ExprNodes.MergedSequenceNode)):
return arg
node.args[:] = args
self._calculate_const(node)
return node
def visit_SequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
args = []
for arg in node.args:
if not arg.is_starred:
args.append(arg)
elif arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
node.args[:] = args
self._calculate_const(node)
return node
def visit_PrimaryCmpNode(self, node):
# calculate constant partial results in the comparison cascade
self.visitchildren(node, ['operand1'])
left_node = node.operand1
cmp_node = node
while cmp_node is not None:
self.visitchildren(cmp_node, ['operand2'])
right_node = cmp_node.operand2
cmp_node.constant_result = not_a_constant
if left_node.has_constant_result() and right_node.has_constant_result():
try:
cmp_node.calculate_cascaded_constant_result(left_node.constant_result)
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
pass # ignore all 'normal' errors here => no constant result
left_node = right_node
cmp_node = cmp_node.cascade
if not node.cascade:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
return node
# collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...]
cascades = [[node.operand1]]
final_false_result = []
def split_cascades(cmp_node):
if cmp_node.has_constant_result():
if not cmp_node.constant_result:
# False => short-circuit
final_false_result.append(self._bool_node(cmp_node, False))
return
else:
# True => discard and start new cascade
cascades.append([cmp_node.operand2])
else:
# not constant => append to current cascade
cascades[-1].append(cmp_node)
if cmp_node.cascade:
split_cascades(cmp_node.cascade)
split_cascades(node)
cmp_nodes = []
for cascade in cascades:
if len(cascade) < 2:
continue
cmp_node = cascade[1]
pcmp_node = ExprNodes.PrimaryCmpNode(
cmp_node.pos,
operand1=cascade[0],
operator=cmp_node.operator,
operand2=cmp_node.operand2,
constant_result=not_a_constant)
cmp_nodes.append(pcmp_node)
last_cmp_node = pcmp_node
for cmp_node in cascade[2:]:
last_cmp_node.cascade = cmp_node
last_cmp_node = cmp_node
last_cmp_node.cascade = None
if final_false_result:
# last cascade was constant False
cmp_nodes.append(final_false_result[0])
elif not cmp_nodes:
# only constants, but no False result
return self._bool_node(node, True)
node = cmp_nodes[0]
if len(cmp_nodes) == 1:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
else:
for cmp_node in cmp_nodes[1:]:
node = ExprNodes.BoolBinopNode(
node.pos,
operand1=node,
operator='and',
operand2=cmp_node,
constant_result=not_a_constant)
return node
def visit_CondExprNode(self, node):
self._calculate_const(node)
if not node.test.has_constant_result():
return node
if node.test.constant_result:
return node.true_val
else:
return node.false_val
def visit_IfStatNode(self, node):
self.visitchildren(node)
# eliminate dead code based on constant condition results
if_clauses = []
for if_clause in node.if_clauses:
condition = if_clause.condition
if condition.has_constant_result():
if condition.constant_result:
# always true => subsequent clauses can safely be dropped
node.else_clause = if_clause.body
break
# else: false => drop clause
else:
# unknown result => normal runtime evaluation
if_clauses.append(if_clause)
if if_clauses:
node.if_clauses = if_clauses
return node
elif node.else_clause:
return node.else_clause
else:
return Nodes.StatListNode(node.pos, stats=[])
def visit_SliceIndexNode(self, node):
self._calculate_const(node)
# normalise start/stop values
if node.start is None or node.start.constant_result is None:
start = node.start = None
else:
start = node.start.constant_result
if node.stop is None or node.stop.constant_result is None:
stop = node.stop = None
else:
stop = node.stop.constant_result
# cut down sliced constant sequences
if node.constant_result is not not_a_constant:
base = node.base
if base.is_sequence_constructor and base.mult_factor is None:
base.args = base.args[start:stop]
return base
elif base.is_string_literal:
base = base.as_sliced_node(start, stop)
if base is not None:
return base
return node
def visit_ComprehensionNode(self, node):
self.visitchildren(node)
if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats:
# loop was pruned already => transform into literal
if node.type is Builtin.list_type:
return ExprNodes.ListNode(
node.pos, args=[], constant_result=[])
elif node.type is Builtin.set_type:
return ExprNodes.SetNode(
node.pos, args=[], constant_result=set())
elif node.type is Builtin.dict_type:
return ExprNodes.DictNode(
node.pos, key_value_pairs=[], constant_result={})
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
sequence = node.iterator.sequence
if isinstance(sequence, ExprNodes.SequenceNode):
if not sequence.args:
if node.else_clause:
return node.else_clause
else:
# don't break list comprehensions
return Nodes.StatListNode(node.pos, stats=[])
# iterating over a list literal? => tuples are more efficient
if isinstance(sequence, ExprNodes.ListNode):
node.iterator.sequence = sequence.as_tuple()
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
if node.condition and node.condition.has_constant_result():
if node.condition.constant_result:
node.condition = None
node.else_clause = None
else:
return node.else_clause
return node
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if not isinstance(node.expr, ExprNodes.ExprNode):
# ParallelRangeTransform does this ...
return node
# drop unused constant expressions
if node.expr.has_constant_result():
return None
return node
def visit_GILStatNode(self, node):
self.visitchildren(node)
if node.condition is None:
return node
if node.condition.has_constant_result():
# Condition is True - Modify node to be a normal
# GILStatNode with condition=None
if node.condition.constant_result:
node.condition = None
# Condition is False - the body of the GILStatNode
# should run without changing the state of the gil
# return the body of the GILStatNode
else:
return node.body
# If condition is not constant we keep the GILStatNode as it is.
# Either it will later become constant (e.g. a `numeric is int`
# expression in a fused type function) and then when ConstantFolding
# runs again it will be handled or a later transform (i.e. GilCheck)
# will raise an error
return node
# in the future, other nodes can have their own handler method here
# that | |
from JumpscaleLibs.sal.ubuntu.Ubuntu import Ubuntu
from Jumpscale import j
from unittest import TestCase
import os
import time
from unittest import skip
from loguru import logger
class Test_Ubuntu(TestCase):
j.sal.process.execute("apt update -y")
j.sal.process.execute("apt-get install -y python3-distutils-extra python3-dbus python3-apt")
LOGGER = logger
LOGGER.add("Config_manager_{time}.log")
@staticmethod
def info(message):
Test_Ubuntu.LOGGER.info(message)
def _check_init_process(self):
process = j.sal.process.getProcessObject(1)
name = process.name()
if not name == "my_init" and not name == "systemd":
raise j.exceptions.RuntimeError("Unsupported init system process")
return name
def setUp(self):
self.ubuntu = Ubuntu()
def tearDown(self):
pass
def test001_uptime(self):
"""TC395
check ubuntu uptime
**Test Scenario**
#. Check uptime from system file located at /proc/uptime
#. Compare it with tested method ubuntu.uptime()
#. Both uptime from system file and from method are almost equal
"""
self.info("verfying uptime method")
with open("/proc/uptime") as f:
data = f.read()
uptime, _ = data.split(" ")
self.assertAlmostEqual(float(uptime), self.ubuntu.uptime(), delta=2)
def test002_service_install(self):
"""TC396
service_install is not a package install which is mean only create a config file in /etc/init/ dir
**Test Scenario**
#. Let take a zdb as out tested service , check the zdb config file existing
#. Check if the service config file is exist, then we need to uninstall service to verify tested method \
service install works well
#. Install zdb service by tested method
#. Verify that config file existing after enable the service
#. Uninstall service to return to origin state
#. if the service config file was exist then we need install service again to return to origin state
"""
mysys = None
zdb_service_file = False
self.info('installing zdb for testing')
j.builders.db.zdb.install()
self.info('checking system is systemd or not ')
mysys = self._check_init_process()
if mysys == 'my_init':
self.info('system is init system')
zdb_service_file = os.path.exists('/etc/service/zdb/run')
elif mysys == 'systemd':
self.info('system is init systemd')
zdb_service_file = os.path.exists('/etc/systemd/system/zdb.service')
else:
self.info('something unexpected occurred while checking system type')
self.assertIn(mysys, ["systemd", "my_init"], "system not supported ")
self.info('checking zdb file existing ')
if zdb_service_file is True:
self.info('zdb file is exist ,service_uninstall to zdb service ')
self.ubuntu.service_uninstall('zdb')
self.info('service_install to zdb service ')
self.ubuntu.service_install('zdb','/sandbox/bin')
self.info('Verify config file existing after using service_install')
if mysys == 'my_init':
self.assertTrue(os.path.exists('/etc/service/zdb/run'))
else:
self.assertTrue(os.path.exists('/etc/systemd/system/zdb.service'))
self.info('zdb service uninstall to return to origin state')
self.ubuntu.service_uninstall('zdb')
if zdb_service_file is True:
self.info('zdb service install to return to origin state')
self.ubuntu.service_install('zdb','/sandbox/zdb')
def test003_version_get(self):
"""TC398
Check the ubuntu version
**Test Scenario**
#. Check Ubuntu version using tested method ubuntu.version_get
#. Verify step1 output include keyword Ubuntu
"""
self.info('checking ubuntu version ')
self.assertIn("Ubuntu", self.ubuntu.version_get())
def test004_apt_install_check(self):
"""TC399
check if an ubuntu package is installed or not installed will install it
**Test Scenario**
#. Just run method and if it fails, it will raise an error
"""
self.info('checking ping is installed or not ')
self.ubuntu.apt_install_check("iputils-ping", "ping")
with self.assertRaises(Exception) as myexcept:
self.ubuntu.apt_install_check("iputils-ping", "elfankosh")
self.info('There is exceptions RuntimeError due to elfankosh is not a command')
self.assertIn("Could not execute: 'which elfankosh'", myexcept.exception.args[0])
def test005_apt_install_version(self):
"""TC400
Install a specific version of an ubuntu package.
**Test Scenario**
#. Install wget package using apt_install_version method
#. check version of wget after installing it
#. step1 and step2 should be identical
:return:
"""
wget_installed = False
wget_installed = self.ubuntu.is_pkg_installed('wget')
self.info('print wget install var is {}'.format(wget_installed))
if wget_installed is True:
self.info('uninstall wget to test install method ')
self.info('installing wget with version 1.19.4')
self.ubuntu.apt_install_version("wget", "1.19.4-1ubuntu2.2")
self.info('checking installed wget version ')
rc, out, err = j.sal.process.execute("wget -V", useShell=True)
self.info('verifying installed wget version is 1.19.4')
self.assertIn("1.19.4", out)
self.info('removing wget to get back to origin state')
j.sal.process.execute("apt remove -y wget")
if wget_installed is True:
self.info('uninstall wget and install default version from ubuntu repo')
j.sal.process.execute("apt install -y wget")
def test006_deb_install(self):
"""TC402
Install a debian package.
**Test Scenario**
#. Download python-tmuxp debian package
#. Install downloaded debian package by deb_install method
#. Get the installed package status by dpkg command
#. Installed package python-tmuxp should be install ok
"""
self.info('Downloading python-tmuxp debian package')
j.sal.process.execute(
"curl http://security.ubuntu.com/ubuntu/pool/universe/t/tmuxp/python-tmuxp_1.5.0a-1_all.deb > python-tmuxp_1.5.0a-1_all.deb"
)
self.info('Install downloaded debian package by deb_install method')
self.ubuntu.deb_install(path="python-tmuxp_1.5.0a-1_all.deb")
self.info('Get the installed package status by dpkg command')
rc, out, err = j.sal.process.execute("dpkg -s python-tmuxp | grep Status", die=False)
self.info('Installed package python-tmuxp should be install ok')
self.assertIn("install ok", out)
def test007_pkg_list(self):
"""TC403
list files of dpkg.
**Test Scenario**
# . no package called ping so output len should equal zero\
the correct package name is iputils-ping
"""
self.info('verifying that pkg_list equal zero as no dpkg called ping, it should be iputils-ping')
self.assertEqual(len(self.ubuntu.pkg_list("ping")), 0)
self.assertGreaterEqual(len(self.ubuntu.pkg_list("iputils-ping")), 1)
def test008_service_start(self):
"""TC404
start an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_start method
#. If status of cron is running then stop cron service so we can test service_start method
#. Start cron service using start_service method
#. Check the corn status by service_status method
#. As it was running before test,starting cron service after finishing testing by service_start method
"""
cront_status = False
self.info('check cron status before testing service_start method ')
cront_status = self.ubuntu.service_status('cron')
if cront_status is True:
self.info('stopping cron service so we can test service_start method')
self.ubuntu.service_stop("cron")
self.info('Start cron service using start_service method ')
self.ubuntu.service_start("cron")
self.info('check the corn status by service_status method')
self.info('status of service is {} '.format(self.ubuntu.service_status('cron')))
self.assertTrue(self.ubuntu.service_status('cron'))
def test009_service_stop(self):
"""TC405
stop an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_stop method
#. If status of cron is not running then start before test service_stop method
#. Service should be running, stopping cron service using tested method service_stop
#. Get the service status by service_status method should be False
#. Retrun cron service status as origin state to be running
#. Stop cron service to be as origin state
"""
cront_status = False
self.info('check cron status before testing service_stop method ')
cront_status = self.ubuntu.service_status('cron')
if cront_status is False:
self.info('status was stopped before test method we need to start it now and stop it after finish test')
self.ubuntu.service_start('cron')
self.info('service should be running, stopping cron service using tested method service_stop')
self.ubuntu.service_stop("cron")
self.info('Get the service status by service_status method should be False ')
self.assertFalse(self.ubuntu.service_status('cron'))
self.info('Retrun cron service status as origin state to be running ')
self.ubuntu.service_start("cron")
if cront_status is False:
self.info('stop cron service to be as origin state')
self.ubuntu.service_stop("cron")
def test010_service_restart(self):
"""TC406
restart an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_start method
#. If status of cron is running then stop cron service so we can test service_start method
#. Restart cron service using start_service method
#. Check the corn status by service_status method
#. As it was running before test,starting cron service after finishing testing by service_start method
"""
cront_status = False
self.info('check cron status before testing service_start method ')
cront_status = self.ubuntu.service_status('cron')
if cront_status is True:
self.info('stopping cron service so we can test service_start method')
self.ubuntu.service_stop("cron")
self.info('restart cron service using start_service method ')
self.ubuntu.service_restart("cron")
self.info('check the corn status by service command')
self.assertTrue(self.ubuntu.service_status('cron'))
def test011_service_status(self):
"""TC407
check service status
**Test Scenario**
#. Get service status
#. if service is not running, verifying tested method return False
#. else service is running, should return True
"""
self.info('Get service status')
state = self.ubuntu.service_status('cron')
if state is False:
self.info('service is not running, verifying tested method return False')
self.assertFalse(self.ubuntu.service_status('cron'))
else:
self.info('service is running, verifying tested method should return True')
self.assertTrue(self.ubuntu.service_status('cron'))
def test012_apt_find_all(self):
"""TC408
find all packages match with the package_name, this mean must not be installed
**Test Scenario**
#. alot if packages are containing wget like 'python3-wget', 'wget'
"""
self.info('verifying all available packages have a keyword wget')
self.assertIn("wget", self.ubuntu.apt_find_all("wget"))
def test013_is_pkg_installed(self):
"""TC409
check if the package is installed or not
**Test Scenario**
#. make sure wget installed successfully
#. Install it if does not installed
#. Verifying tested pkg_installed should return True as wget is installed
#. Remove it to return to origin state
"""
wget_is_installed = False
self.info('make sure wget installed')
rc1, out, err = j.sal.process.execute("dpkg -s wget|grep Status")
if 'deinstall ok' in out:
self.info('install wget as it does not installed')
j.sal.process.execute("apt install -y wget")
self.info('verifying tested pkg_installed should return True as wget is installed')
wget_is_installed = j.sal.ubuntu.is_pkg_installed("wget")
self.info(' wget_is_installed is {} '.format(wget_is_installed))
self.assertTrue(wget_is_installed)
if 'install ok' | |
from __future__ import absolute_import
import numpy as np
from .Node import Op, NAME_RULE, PROFILING_MODE
from .. import profiler
from .._base import get_array_memory
class Conv2dOp(Op):
# nodeA : x nodeB : filter
def __call__(self, node_A, node_B, padding=0, padding2 = None, stride=1, For_ResNet = False):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
self.padding = padding
self.padding2 = padding2
self.For_ResNet = For_ResNet
if padding2 is None:
self.padding2 = self.padding
self.stride = stride
new_node.profiler = None
if PROFILING_MODE == 1:
new_node.profiler = profiler.CreateProfiler()
# print "init padding = ", padding
if NAME_RULE == 0:
new_node.name = "Conv2d(%s, %s)" % (node_A.name, node_B.name)
elif NAME_RULE == 1:
new_node.name = "Conv2d"
else:
new_node.name = "conv2d"+str(new_node.id)
new_node.desc = new_node.name + \
"(%s, %s)" % (node_A.name, node_B.name)
return new_node
def im2col(self, X, filter_H, filter_W, padding, stride):
N, C, H, W = X.shape
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding - filter_W) / stride + 1
y_row_size = C * filter_H * filter_W
y_col_size = out_H * out_W
y_shape = (N, y_row_size, y_col_size)
Y = np.empty(y_shape, dtype=X.dtype)
for batch_index in range(N):
for col_index in range(y_col_size):
out_y = col_index / out_W
out_x = col_index % out_W
in_y = out_y * stride - padding
in_x = out_x * stride - padding
row_idx = 0
for c in range(0, C):
for y in range(in_y, in_y + filter_H):
for x in range(in_x, in_x + filter_W):
if (x < 0 or x >= W or y < 0 or y >= H):
Y[batch_index, row_idx, col_index] = 0
else:
Y[batch_index, row_idx,
col_index] = X[batch_index, c, y, x]
row_idx += 1
return Y
def np_conv2d(self, X, Filter, padding=0, stride=1):
"""Implement a conv2d as a matrix multiply after im2col."""
filter_outChannel, filter_inChannel, filter_H, filter_W = Filter.shape
N, C, H, W = X.shape
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding - filter_W) / stride + 1
im2col_matrix = self.im2col(X, filter_H, filter_W, padding, stride)
filter_matrix = Filter.reshape(filter_outChannel, -1)
return np.matmul(filter_matrix, im2col_matrix).reshape(N, filter_outChannel, out_H, out_W)
def profile(self, node, input_vals, output_val, is_static = True):
assert len(input_vals) == 2
if is_static:
# input memory
node.profiler.input_memory = get_array_memory(input_vals[0].shape) + \
get_array_memory(input_vals[1].shape)
# output memory
node.profiler.output_memory = get_array_memory(output_val.shape)
# TODO
# no workspace
node.profiler.workspace_memory = 0
# execute time
node.profiler.time = node.profiler.output_memory / 4 * profiler.FLOPS_PER_SECOND
else:
# import time
# start = time.time()
from ..gpu_links import CuDNN_conv2d
CuDNN_conv2d(input_vals[0], input_vals[1],
output_val, self.padding, self.padding2, self.stride, None, node.profiler)
# print("time.time: {} ms".format((time.time() - start) * 1000))
# node.profiler.time = time.time() - start
def compute(self, node, input_vals, output_val, use_numpy=True, stream_handle=None):
assert len(input_vals) == 2
if use_numpy:
from .._base import DNNL_LIB
if DNNL_LIB['DnnlConv2d']:
from ..cpu_links import conv2d as cpu_conv2d
from ..ndarray import numpyasdlarrayhandle
input_x = numpyasdlarrayhandle(input_vals[0])
input_f = numpyasdlarrayhandle(input_vals[1])
output = numpyasdlarrayhandle(output_val)
cpu_conv2d(input_x, input_f, output, self.padding, self.stride)
else:
output_val[:] = self.np_conv2d(
input_vals[0], input_vals[1], self.padding, self.stride)
else:
from ..gpu_links import CuDNN_conv2d
CuDNN_conv2d(input_vals[0], input_vals[1],
output_val, self.padding, self.padding2, self.stride, stream_handle, None)
def gradient(self, node, output_grad):
return [conv2d_gradient_of_data_op(node.inputs[1], output_grad, self.padding, self.padding2, self.stride, self.For_ResNet),\
conv2d_gradient_of_filter_op(node.inputs[0], output_grad, self.padding, self.padding2, self.stride)]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
# print "infer padding = ",self.padding
N, _, H, W = input_shapes[0]
f_O, _, f_H, f_W = input_shapes[1]
padding = self.padding
padding2 = self.padding2
stride = self.stride
filter_H = input_shapes[1][2]
filter_W = input_shapes[1][3]
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding2 - filter_W) / stride + 1
# print "conv2d_shape"
# print(N, f_O, out_H, out_W)
return (N, f_O, out_H, out_W)
class Conv2d_Gradient_of_DataOp(Op):
# nodeA : filter nodeB : Y_gradient
def __call__(self, node_A, node_B, padding=0, padding2 = None, stride=1, For_ResNet = False):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
self.padding = padding
self.padding2 = padding2
self.stride = stride
self.For_ResNet = For_ResNet
new_node.profiler = None
if PROFILING_MODE == 1:
new_node.profiler = profiler.CreateProfiler()
if NAME_RULE == 0:
new_node.name = "Conv2d_Gradient_of_DataOp(%s, %s)" % (
node_A.name, node_B.name)
elif NAME_RULE == 1:
new_node.name = "Conv2d_Gradient_of_DataOp"
else:
new_node.name = "Conv2d_Gradient_of_DataOp"+str(new_node.id)
new_node.desc = new_node.name + \
"(%s, %s)" % (node_A.name, node_B.name)
return new_node
def im2col_transpose(self, N, C, H, W, filter_H, filter_W, Y, padding, stride):
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding - filter_W) / stride + 1
_, y_row_size, y_col_size = Y.shape
der_X_shape = (N, C, H, W)
der_X = np.zeros(der_X_shape, dtype=Y.dtype)
# print "batch_size", N
for batch_index in range(N):
for col_index in range(y_col_size):
out_y = col_index / out_W
out_x = col_index % out_W
in_y = out_y * stride - padding
in_x = out_x * stride - padding
row_idx = 0
for c in range(0, C):
for y in range(in_y, in_y + filter_H):
for x in range(in_x, in_x + filter_W):
if (x < 0 or x >= W or y < 0 or y >= H):
Y[batch_index, row_idx, col_index] = 0
else:
der_X[batch_index, c, y,
x] += Y[batch_index, row_idx, col_index]
row_idx += 1
return der_X
def np_Conv2dGradient_data(self, X_N, X_C, X_H, X_W, Filter, Y, padding=0, stride=1):
filter_outChannel, filter_inChannel, filter_H, filter_W = Filter.shape
Y_N, Y_C, Y_H, Y_W = Y.shape
YY = Y.reshape((Y_N, Y_C, Y_H * Y_W)) # transformed to im2col Y
F_filter = Filter.reshape((filter_outChannel, -1))
gradient_im2col_XX = np.matmul(F_filter.T, YY)
gradient_X = self.im2col_transpose(
X_N, X_C, X_H, X_W, filter_H, filter_W, gradient_im2col_XX, padding, stride) # gradient of x
return gradient_X
def profile(self, node, input_vals, output_val, is_static = True):
assert len(input_vals) == 2
if is_static:
# input memory
node.profiler.input_memory = get_array_memory(input_vals[0].shape) + \
get_array_memory(input_vals[1].shape)
# output memory
node.profiler.output_memory = get_array_memory(output_val.shape)
# no workspace
node.profiler.workspace_memory = 0
# execute time
node.profiler.time = node.profiler.output_memory / 4 * profiler.FLOPS_PER_SECOND
else:
# import time
# start = time.time()
from ..gpu_links import CuDNN_conv2d_gradient_of_data
CuDNN_conv2d_gradient_of_data(
input_vals[0], input_vals[1], output_val, padding=self.padding, padding2=self.padding2, stride=self.stride, stream = None, profiler = node.profiler)
# node.profiler.time = time.time() - start
def compute(self, node, input_vals, output_val, use_numpy=True, stream_handle=None):
assert len(input_vals) == 2
N = input_vals[1].shape[0]
C = input_vals[0].shape[1]
H = (input_vals[1].shape[2] - 1) * self.stride + \
input_vals[0].shape[2] - 2 * self.padding
W = (input_vals[1].shape[3] - 1) * self.stride + \
input_vals[0].shape[3] - 2 * self.padding
if use_numpy:
from .._base import DNNL_LIB
if DNNL_LIB['DnnlConv2d_Gradient_of_Data']:
from ..cpu_links import conv2d_gradient_of_data as cpu_conv2d_gradient_of_data
from ..ndarray import numpyasdlarrayhandle
input_f = numpyasdlarrayhandle(input_vals[0])
gradient_y = numpyasdlarrayhandle(input_vals[1])
gradient_x = numpyasdlarrayhandle(output_val)
cpu_conv2d_gradient_of_data(input_f, gradient_y, gradient_x, self.padding, self.stride)
else:
output_val[:] = self.np_Conv2dGradient_data(
N, C, H, W, input_vals[0], input_vals[1], padding=self.padding, stride=self.stride)
else:
from ..gpu_links import CuDNN_conv2d_gradient_of_data
CuDNN_conv2d_gradient_of_data(
input_vals[0], input_vals[1], output_val, padding=self.padding, padding2 = self.padding2, stride=self.stride, stream = stream_handle, profiler = None)
def gradient(self, node, output_grad):
raise NotImplementedError
def infer_shape(self, node, input_shapes):
"""TODO: Your code here"""
# print self.For_ResNet
assert len(input_shapes) == 2
N = input_shapes[1][0]
C = input_shapes[0][1]
H = (input_shapes[1][2] - 1) * self.stride + \
input_shapes[0][2] - 2 * self.padding + (1 if self.For_ResNet and self.stride == 2 else 0)
W = (input_shapes[1][3] - 1) * self.stride + \
input_shapes[0][3] - 2 * self.padding2 + (1 if self.For_ResNet and self.stride == 2 else 0)
return (N, C, H, W)
class Conv2d_Gradient_of_FilterOp(Op):
# nodeA : input_x nodeB : gradient_Y
def __call__(self, input_X, gradient_Y, padding=0, padding2=None, stride=1):
new_node = Op.__call__(self)
new_node.inputs = [input_X, gradient_Y]
self.padding = padding
if padding2 is None:
self.padding2 = self.padding
else:
self.padding2 = padding2
self.stride = stride
new_node.profiler = None
if PROFILING_MODE == 1:
new_node.profiler = profiler.CreateProfiler()
if NAME_RULE == 0:
new_node.name = "Conv2d_Gradient_of_FilterOp(%s, %s)" % (
input_X.name, gradient_Y.name)
elif NAME_RULE == 1:
new_node.name = "Conv2d_Gradient_of_FilterOp"
else:
new_node.name = "Conv2d_Gradient_of_FilterOp"+str(new_node.id)
new_node.desc = new_node.name + \
"(%s, %s)" | |
"""
.. _tut-fnirs-vis-brain:
Utilising Anatomical Information
================================
This example demonstrates how you can utilise anatomical and sensor position
information in your analysis pipeline. This information can be used to
verify measurement/analysis and also improve analysis accuracy
:footcite:`novi2020integration`.
This example demonstrates how to plot your data on a 3D brain
and overlay the sensor locations and regions of interest.
This tutorial glosses over the processing details, see the
:ref:`GLM tutorial <tut-fnirs-hrf>` for details on the preprocessing.
.. contents:: Page contents
:local:
:depth: 2
"""
# sphinx_gallery_thumbnail_number = 5
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
import pandas as pd
import mne
from mne.preprocessing.nirs import optical_density, beer_lambert_law
import statsmodels.formula.api as smf
from mne_bids import BIDSPath, read_raw_bids, get_entity_vals
import mne_nirs
from mne_nirs.experimental_design import make_first_level_design_matrix
from mne_nirs.statistics import run_glm, statsmodels_to_results
from mne_nirs.channels import get_long_channels, get_short_channels
from mne_nirs.io.fold import fold_landmark_specificity
from mne_nirs.visualisation import plot_nirs_source_detector, plot_glm_surface_projection
from mne_nirs.datasets import fnirs_motor_group
# %%
# Download example data
# -------------------------------
#
# First, the data required data for this tutorial is downloaded.
# %%
# Download example fNIRS data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Download the ``audio_or_visual_speech`` dataset and load the first measurement.
root = mne_nirs.datasets.audio_or_visual_speech.data_path()
dataset = BIDSPath(root=root, suffix="nirs", extension=".snirf", subject="04",
task="AudioVisualBroadVsRestricted", datatype="nirs", session="01")
raw = read_raw_bids(dataset)
# %%
# Download annotation information
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Download the HCP-MMP parcellation.
# Download anatomical locations
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir, accept=True)
labels = mne.read_labels_from_annot('fsaverage', 'HCPMMP1', 'lh', subjects_dir=subjects_dir)
labels_combined = mne.read_labels_from_annot('fsaverage', 'HCPMMP1_combined', 'lh', subjects_dir=subjects_dir)
# %%
# Verify placement of sensors
# ---------------------------
#
# The first thing we can do is plot the location of the optodes and channels
# over an average brain surface to verify the data, specifically the 3D coordinates,
# have been loaded correctly. The sources are represented as red dots,
# the detectors are represented as black dots, the whit lines represent source-detector
# pairs, and the orange dots represent channel locations.
# In this example we can see channels over the left inferior frontal gyrus,
# auditory cortex, planum temporale, and occipital lobe.
brain = mne.viz.Brain('fsaverage', subjects_dir=subjects_dir, background='w', cortex='0.5')
brain.add_sensors(raw.info, trans='fsaverage', fnirs=['channels', 'pairs', 'sources', 'detectors'])
brain.show_view(azimuth=180, elevation=80, distance=450)
# %%
# .. _tut-fnirs-vis-brain-plot-3d-montage:
#
# Plot sensor channel numbers
# ---------------------------
# Often for publications and sanity checking, it's convenient to create an
# image showing the channel numbers along with the (typically) 10-20 location
# in the correct locations in a 3D view. The function
# :func:`mne_nirs.visualisation.plot_3d_montage` gives us this once we
# specify which views to use to show each channel pair:
view_map = {
'left-lat': np.r_[np.arange(1, 27), 28],
'caudal': np.r_[27, np.arange(43, 53)],
'right-lat': np.r_[np.arange(29, 43), 44],
}
fig_montage = mne_nirs.visualisation.plot_3d_montage(
raw.info, view_map=view_map, subjects_dir=subjects_dir)
# %%
# Plot sensor channels and anatomical region of interest
# ------------------------------------------------------
#
# Once the data has been loaded we can highlight anatomical regions of interest
# to ensure that the sensors are appropriately placed to measure from
# the relevant brain structures.
# In this example we highlight the primary auditory cortex in blue,
# and we can see that a number of channels are placed over this structure.
brain = mne.viz.Brain('fsaverage', subjects_dir=subjects_dir, background='w', cortex='0.5')
brain.add_sensors(raw.info, trans='fsaverage', fnirs=['channels', 'pairs', 'sources', 'detectors'])
aud_label = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
brain.add_label(aud_label, borders=False, color='blue')
brain.show_view(azimuth=180, elevation=80, distance=450)
# %%
# Plot channels sensitive to anatomical region of interest
# --------------------------------------------------------
#
# .. sidebar:: fOLD Toolbox
#
# You should use the fOLD toolbox to pick your optode locations
# when designing your experiment.
# The tool is very intuitive and easy to use.
# Be sure to cite the authors if you use their tool or data:
#
# Morais, Guilherme <NAME>, <NAME>, and <NAME>. "fNIRS optodes’ location decider (fOLD): a toolbox for probe arrangement guided by brain regions-of-interest." Scientific reports 8.1 (2018): 1-11.
#
# Rather than simply eye balling the sensor and ROIs of interest, we can
# quantify the specificity of each channel to the anatomical region of interest
# and select channels that are sufficiently sensitive for further analysis.
# In this example we highlight the left inferior frontal gyrus (IFG) and
# use data from the fOLD toolbox :footcite:`morais2018fnirs`.
# To see more details about how to use the fOLD data see
# :ref:`this tutorial <tut-fnirs-group-relating>`.
# Return specificity of each channel to the Left IFG
specificity = fold_landmark_specificity(raw, 'L IFG (p. Triangularis)')
# Retain only channels with specificity to left IFG of greater than 50%
raw_IFG = raw.copy().pick(picks=np.where(specificity > 50)[0])
brain = mne.viz.Brain('fsaverage', subjects_dir=subjects_dir, background='w', cortex='0.5')
brain.add_sensors(raw_IFG.info, trans='fsaverage', fnirs=['channels', 'pairs'])
ifg_label = [label for label in labels_combined if label.name == 'Inferior Frontal Cortex-lh'][0]
brain.add_label(ifg_label, borders=False, color='green')
brain.show_view(azimuth=140, elevation=95, distance=360)
# %%
#
# Alternatively, we can retain all channels and visualise the specificity of each channel the ROI
# by encoding the specificty in the color of the line between each source and detector.
# In this example we see that several channels have substantial specificity to
# the region of interest.
#
# Note: this function currently doesn't support the new MNE brain API, so does
# not allow the same behaviour as above (adding sensors, highlighting ROIs etc).
# It should be updated in the near future.
fig = plot_nirs_source_detector(specificity, raw.info, surfaces='brain',
subject='fsaverage', subjects_dir=subjects_dir, trans='fsaverage')
mne.viz.set_3d_view(fig, azimuth=140, elevation=95)
# %%
# Anatomically informed weighting in region of interest analysis
# --------------------------------------------------------------
#
# As observed above, some channels have greater specificity to the desired
# brain region than other channels.
# Thus, when doing a region of interest analysis you may wish to give extra
# weight to channels with greater sensitivity to the desired ROI.
# This can be done by manually specifying the weights used in the region of
# interest function call.
# The details of the GLM analysis will not be described here, instead view the
# :ref:`fNIRS GLM tutorial <tut-fnirs-hrf>`. Instead, comments are provided
# for the weighted region of interest function call.
# Basic pipeline, simplified for example
raw_od = optical_density(raw)
raw_haemo = beer_lambert_law(raw_od)
raw_haemo.resample(0.3).pick("hbo") # Speed increase for web server
sht_chans = get_short_channels(raw_haemo)
raw_haemo = get_long_channels(raw_haemo)
design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=13.0)
design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
glm_est = run_glm(raw_haemo, design_matrix)
# First we create a dictionary for each region of interest.
# Here we include all channels in each ROI, as we will later be applying
# weights based on their specificity to the brain regions of interest.
rois = dict()
rois["Audio_weighted"] = range(len(glm_est.ch_names))
rois["Visual_weighted"] = range(len(glm_est.ch_names))
# Next we compute the specificity for each channel to the auditory and visual cortex.
spec_aud = fold_landmark_specificity(raw_haemo, '42 - Primary and Auditory Association Cortex', atlas="Brodmann")
spec_vis = fold_landmark_specificity(raw_haemo, '17 - Primary Visual Cortex (V1)', atlas="Brodmann")
# Next we create a dictionary to store the weights for each channel in the ROI.
# The weights will be the specificity to the ROI.
# The keys and length of each dictionary entry must match the ROI dictionary.
weights = dict()
weights["Audio_weighted"] = spec_aud
weights["Visual_weighted"] = spec_vis
# Finally we compute region of interest results using the weights specified above
out = glm_est.to_dataframe_region_of_interest(rois, ["Video", "Control"], weighted=weights)
out["Significant"] = out["p"] < 0.05
out
# %%
# In the table above we observe that the response to the visual condition
# is only present in the visual region of interest. You can use this
# technique to load any custom weighting, including weights exported from
# other software.
# %%
# Preprocess fNIRS data
# ---------------------
#
# We can also use the 3D information to project the results on to the cortical surface.
# First, we process the fNIRS data. This is a duplication of the GLM tutorial
# analysis. The details will not be described here, instead view the
# :ref:`fNIRS GLM tutorial <tut-fnirs-hrf>`.
def individual_analysis(bids_path, ID):
raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
# sanitize event names
raw_intensity.annotations.description[:] = [
d.replace('/', '_') for d in raw_intensity.annotations.description]
# Convert signal to haemoglobin and resample
raw_od = optical_density(raw_intensity)
raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
raw_haemo.resample(0.3)
# Cut out just the short channels for creating a GLM repressor
sht_chans = get_short_channels(raw_haemo)
raw_haemo = get_long_channels(raw_haemo)
# Create a design matrix
design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)
# Append short channels mean to design matrix
design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0)
# Run GLM
glm_est = run_glm(raw_haemo, design_matrix)
# Extract channel metrics
cha = glm_est.to_dataframe()
# Add the participant ID to the dataframes
cha["ID"] = ID
# Convert to uM for nicer plotting below.
cha["theta"] = [t * 1.e6 for t in cha["theta"]]
return raw_haemo, cha
# Get dataset details
root = fnirs_motor_group.data_path()
dataset = BIDSPath(root=root, task="tapping",
datatype="nirs", suffix="nirs", extension=".snirf")
subjects = get_entity_vals(root, 'subject')
df_cha = pd.DataFrame() # To store channel level results
for sub in subjects: # Loop from first to fifth subject
# Create path to file based on experiment info
bids_path = dataset.update(subject=sub)
# Analyse data and return both ROI and channel results
raw_haemo, channel = individual_analysis(bids_path, sub)
# Append individual results to all participants
df_cha = pd.concat([df_cha, | |
0, 0],
[0, 0, 0, 0, 5/2*Ychi**2-2*b1, 0, -6*Ychi, 0],
[0, 0, 0, 0, -4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi],
[0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2), 0],
[0, 0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2)]])
adm5_g2 = np.array([[2*jj1, -4*Ychi, 0, -24, 0, 0, 0, 0],
[0, (10*jj1-8)-2*b2, 12*jj1, 0, 0, 0, 0, 0],
[0, 0, (-9/2-6*jj1), 0, 0, 0, 0, 0],
[0, 0, 0, (3/2-6*jj1), 0, 0, 0, 0],
[0, 0, 0, 0, 2*jj1, -4*Ychi, 0, -24],
[0, 0, 0, 0, 0, (10*jj1-8)-2*b2, 12*jj1, 0],
[0, 0, 0, 0, 0, 0, (-9/2-6*jj1), 0],
[0, 0, 0, 0, 0, 0, 0, (3/2-6*jj1)]])
adm5_g3 = np.zeros((8,8))
adm5_yc = np.diag([0,0,6,6,0,0,6,6])
adm5_ytau = np.diag([0,0,2,2,0,0,2,2])
adm5_yb = np.diag([0,0,6,6,0,0,6,6])
adm5_yt = np.diag([0,0,6,6,0,0,6,6])
adm5_lam = np.diag([0,0,3,1,0,0,3,1])
full_adm = np.array([adm5_g1, adm5_g2, adm5_g3, adm5_yc, adm5_ytau, adm5_yb, adm5_yt, adm5_lam])
if dchi == 1:
return np.delete(np.delete(full_adm, [1,3,5,7], 1), [1,3,5,7], 2)
else:
return full_adm
def ADM6(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, ytau, yb, and yt
The running due to the Higgs self coupling lambda is currently ignored.
The operator basis is Q1-Q14 1st, 2nd, 3rd gen.; S1-S17 (mixing of gen: 1-1, 2-2, 3-3, 1-2, 1-3, 2-3),
S18-S24 1st, 2nd, 3rd gen., S25; D1-D4.
The explicit ordering of the operators, including flavor indices, is contained in the file
"directdm/run/operator_ordering.txt"
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
scope = locals()
def load_adm(admfile):
with open(admfile, "r") as f:
adm = []
for line in f:
line = re.sub("\n", "", line)
line = line.split(",")
adm.append(list(map(lambda x: eval(x, scope), line)))
return adm
admg1 = load_adm(resource_filename("directdm", "run/full_adm_g1.py"))
admg2 = load_adm(resource_filename("directdm", "run/full_adm_g2.py"))
admg3 = np.zeros((207,207))
admyc = load_adm(resource_filename("directdm", "run/full_adm_yc.py"))
admytau = load_adm(resource_filename("directdm", "run/full_adm_ytau.py"))
admyb = load_adm(resource_filename("directdm", "run/full_adm_yb.py"))
admyt = load_adm(resource_filename("directdm", "run/full_adm_yt.py"))
admlam = np.zeros((207,207))
full_adm = np.array([np.array(admg1), np.array(admg2), admg3,\
np.array(admyc), np.array(admytau), np.array(admyb),\
np.array(admyt), np.array(admlam)])
if dchi == 1:
return np.delete(np.delete(full_adm, [0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 1),\
[0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 2)
else:
return full_adm
def ADM_QCD_dim8(nf):
""" Return the QCD anomalous dimension in the DM-SM sector at dim.8, for nf flavor EFT """
beta0 = rge.QCD_beta(nf, 1).trad()
gammam0 = rge.QCD_gamma(nf, 1).trad()
ADM8 = 2*(gammam0 - beta0) * np.eye(12)
return ADM8
def ADM_SM_QCD(nf):
""" Return the QCD anomalous dimension in the SM-SM sector for nf flavor EFT, for a subset of SM dim.6 operators
The basis is spanned by a subset of 10*8 + 5*4 = 100 SM operators, with Wilson coefficients
['P61ud', 'P62ud', 'P63ud', 'P63du', 'P64ud', 'P65ud', 'P66ud', 'P66du',
'P61us', 'P62us', 'P63us', 'P63su', 'P64us', 'P65us', 'P66us', 'P66su',
'P61uc', 'P62uc', 'P63uc', 'P63cu', 'P64uc', 'P65uc', 'P66uc', 'P66cu',
'P61ub', 'P62ub', 'P63ub', 'P63bu', 'P64ub', 'P65ub', 'P66ub', 'P66bu',
'P61ds', 'P62ds', 'P63ds', 'P63sd', 'P64ds', 'P65ds', 'P66ds', 'P66sd',
'P61dc', 'P62dc', 'P63dc', 'P63cd', 'P64dc', 'P65dc', 'P66dc', 'P66cd',
'P61db', 'P62db', 'P63db', 'P63bd', 'P64db', 'P65db', 'P66db', 'P66bd',
'P61sc', 'P62sc', 'P63sc', 'P63cs', 'P64sc', 'P65sc', 'P66sc', 'P66cs',
'P61sb', 'P62sb', 'P63sb', 'P63bs', 'P64sb', 'P65sb', 'P66sb', 'P66bs',
'P61cb', 'P62cb', 'P63cb', 'P63bc', 'P64cb', 'P65cb', 'P66cb', 'P66bc',
'P61u', 'P62u', 'P63u', 'P64u',
'P61d', 'P62d', 'P63d', 'P64d',
'P61s', 'P62s', 'P63s', 'P64s',
'P61c', 'P62c', 'P63c', 'P64c',
'P61b', 'P62b', 'P63b', 'P64b']
"""
adm_qqp_qqp = np.array([[0, 0, 0, 0, 0, 12, 0, 0],
[0, 0, 0, 0, 12, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 12],
[0, 0, 0, 0, 0, 0, 12, 0],
[0, 8/3, 0, 0, - 19/3, 5, 0, 0],
[8/3, 0, 0, 0, 5, - 9, 0, 0],
[0, 0, 0, 8/3, 0, 0, - 23/3, 5],
[0, 0, 8/3, 0, 0, 0, 5, - 23/3]])
adm_qqp_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3]])
adm_qqp_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0]])
adm_q_q = np.array([[4, 4, 0, - 28/3],
[0, 0, 0, 44/3],
[0, 0, 44/9, 0],
[5/3, 13/3, 0, - 106/9]])
adm_qqp_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 4/9, 0],
[0, 0, 0, 0]])
adm_qpq_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 4/9, 0]])
adm_q_qqp = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 8/3, 0],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_q_qpq = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 8/3],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_ud = np.hstack((adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qqpp, adm_qpq_qqpp,\
adm_qpq_qqpp, np.zeros((8, 24)), adm_qqp_q,\
adm_qpq_q, np.zeros((8,12))))
adm_us = np.hstack((adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qppq, np.zeros((8,16)),\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8, 8)),\
adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,8))))
adm_uc = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\
adm_qqp_qqpp, np.zeros((8,8)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, np.zeros((8, 8)),\
adm_qpq_qqpp, adm_qqp_q, np.zeros((8,8)),\
adm_qpq_q, np.zeros((8,4))))
adm_ub = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqp, np.zeros((8,16)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq,\
adm_qqp_q, np.zeros((8,12)), adm_qpq_q))
adm_ds = np.hstack((adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,16)),\
adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8,8)),\
np.zeros((8,4)), adm_qqp_q, adm_qpq_q, np.zeros((8,8))))
adm_dc = np.hstack((adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\
adm_qpq_qppq, np.zeros((8,8)), adm_qpq_qqpp,\
np.zeros((8,4)), adm_qqp_q, np.zeros((8,4)),\
adm_qpq_q, np.zeros((8,4))))
adm_db = np.hstack((adm_qqp_qppq, np.zeros((8,16)), adm_qpq_qppq,\
adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\
np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq,\
np.zeros((8,4)), adm_qqp_q, np.zeros((8,8)), adm_qpq_q))
adm_sc = np.hstack((np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)),\
adm_qqp_qqp, adm_qqp_qqpp, adm_qpq_qqpp, np.zeros((8,8)),\
adm_qqp_q, adm_qpq_q, np.zeros((8,4))))
adm_sb = np.hstack((np.zeros((8,8)), adm_qqp_qppq, np.zeros((8,8)),\
adm_qpq_qppq, adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\
adm_qqp_qqpp, adm_qqp_qqp, adm_qpq_qppq, np.zeros((8,8)),\
adm_qqp_q, np.zeros((8,4)), adm_qpq_q))
adm_cb = np.hstack((np.zeros((8,16)), adm_qqp_qppq, adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\
adm_qqp_qppq, adm_qpq_qppq, adm_qqp_qqp,\
np.zeros((8,12)), adm_qqp_q, adm_qpq_q))
adm_u = np.hstack((adm_q_qqp, adm_q_qqp, adm_q_qqp, adm_q_qqp,\
np.zeros((4,48)), adm_q_q, np.zeros((4,16))))
adm_d = np.hstack((adm_q_qpq, np.zeros((4,24)), adm_q_qqp, adm_q_qqp,\
adm_q_qqp, np.zeros((4,24)), np.zeros((4,4)),\
adm_q_q, np.zeros((4,12))))
adm_s = np.hstack((np.zeros((4,8)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,16)), adm_q_qqp,\
adm_q_qqp, np.zeros((4,8)),\
np.zeros((4,8)), adm_q_q, np.zeros((4,8))))
adm_c = np.hstack((np.zeros((4,16)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,8)),\
adm_q_qpq, np.zeros((4,8)), adm_q_qqp,\
np.zeros((4,12)), adm_q_q, np.zeros((4,4))))
adm_b = np.hstack((np.zeros((4,24)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,8)), adm_q_qpq,\
adm_q_qpq, np.zeros((4,16)), adm_q_q))
adm = np.vstack((adm_ud, adm_us, adm_uc, adm_ub, adm_ds,\
adm_dc, adm_db, adm_sc, adm_sb, adm_cb,\
adm_u, adm_d, adm_s, | |
= mm3by2(f_hat2, tf.reshape(gold2, [batch * clen, -1]), transpose=True) # bs x cl x bs*cl
global_step = tf.to_float(py_utils.GetOrCreateGlobalStep())
temperatures = [tf.minimum(tf.constant(sras), global_step) / sras for sras in p.sent_role_anneal_steps]
for i, t in enumerate(temperatures):
tf.summary.scalar('temperature_sent_role_%d' %i, t)
den_dot = sum([dots[0]] + [dot * temperature for dot, temperature in zip(dots[1:], temperatures)])
inter_res.gold_embs = gold_embs
inter_res.dots = dots
inter_res.dot = den_dot
with tf.name_scope('chunk_loss'):
delta = tf.scatter_nd(last_pred_pos_indices, -tf.ones([batch]), [batch, clen])
chunk_weights = chunk_weights + delta
one_hot_target = tf.one_hot(merged_indices, batch * clen, off_value=1e-8)
den_dot = den_dot + tf.reshape(chunk_weights * 99.0 - 99.0, [-1])
chunk_log_probs = tf.reduce_sum(one_hot_target * tf.nn.log_softmax(den_dot), axis=-1)
# if p.pred_mode == 'rnn':
# out.chunk_log_probs = chunk_log_probs * tf.transpose(chunk_weights, [1, 0])
# else:
out.chunk_log_probs = chunk_log_probs * chunk_weights
out.num_chunks = tf.reduce_sum(chunk_weights) + 1e-8
inter_res.w_chunk = chunk_weights
inter_res.target = one_hot_target
inter_res.masked_dot = den_dot
inter_res.clp = out.chunk_log_probs
inter_res.num_chunks = out.num_chunks
out.inter_res = inter_res
return out, state1
else:
return out, state1
class RnnLm(RnnLmNoEmbedding):
"""Stacked RNN based language model layer."""
@classmethod
def Params(cls):
p = super(RnnLm, cls).Params()
p.Define('emb', layers.EmbeddingLayer.Params(),
'The embedding layer params.')
p.Define('embedding_dropout_keep_prob', 1.0, 'Embedding dropout keep prob.')
p.Define('embedding_dropout_seed', None, 'Embedding dropout seed.')
p.Define('tie', False, 'Tie input and output embeddings.')
p.emb.max_num_shards = 1
return p
# TODO(zhifengc): Consider merge Params() and CommonParams().
@classmethod
def CommonParams(cls,
vocab_size,
emb_dim=1024,
num_layers=2,
rnn_dims=2048,
rnn_hidden_dims=0,
residual_start=1,
softmax_max_alloc=None):
"""A LM model parameterized by vocab size, etc.
Args:
vocab_size: Vocab size.
emb_dim: Embedding dimension.
num_layers: The number of rnn layers.
rnn_dims: Each RNN layer has this many output nodes.
rnn_hidden_dims: If > 0, each RNN layer has this many hidden nodes.
residual_start: index of the first layer with a residual connection;
higher index layers also have residuals.
softmax_max_alloc: If set to a positive integer the soft-max
computation is chunked into allocations of at most
`softmax_max_alloc`; when left to its default value of None no
chunking is done.
Returns:
A `RnnLm` parameter object.
"""
p = cls.Params()
p.vocab_size = vocab_size
init_scale = 1.0 / math.sqrt(rnn_dims)
# Embedding.
p.emb.vocab_size = vocab_size
p.emb.embedding_dim = emb_dim
p.emb.scale_sqrt_depth = False
p.emb.params_init = py_utils.WeightInit.Uniform(init_scale)
# RNNs
p.rnns.num_layers = num_layers
# Which layer starts to have the residual connection.
p.rnns.skip_start = residual_start
if num_layers > 1:
p.rnns.cell_tpl = [
rnn_cell.LSTMCellSimple.Params().Set(
num_input_nodes=emb_dim,
num_output_nodes=rnn_dims,
num_hidden_nodes=rnn_hidden_dims),
rnn_cell.LSTMCellSimple.Params().Set(
num_input_nodes=rnn_dims,
num_output_nodes=rnn_dims,
num_hidden_nodes=rnn_hidden_dims)
]
else:
p.rnns.cell_tpl = [
rnn_cell.LSTMCellSimple.Params().Set(
num_input_nodes=emb_dim,
num_output_nodes=rnn_dims,
num_hidden_nodes=rnn_hidden_dims)
]
# Softmax
p.softmax.input_dim = rnn_dims
p.softmax.num_classes = vocab_size
p.softmax.params_init = py_utils.WeightInit.Uniform(init_scale)
if softmax_max_alloc:
# If the vocab is very large, computes the softmax chunk-by-chunk.
p.softmax.chunk_size = max(1, int(softmax_max_alloc / vocab_size))
return p
@base_layer.initializer
def __init__(self, params):
super(RnnLm, self).__init__(params)
p = self.params
assert p.emb.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.emb.vocab_size, p.vocab_size))
assert p.emb.embedding_dim == p.rnns.cell_tpl[0].num_input_nodes, (
'{} vs. {}'.format(p.emb.embedding_dim,
p.rnns.cell_tpl[0].num_input_nodes))
with tf.variable_scope(p.name):
self.CreateChild('emb', p.emb)
def FProp(self,
theta,
inputs,
paddings,
state0,
labels=None,
direct_features=None,
chunk_ids=None,
step_inference=False,
ids=None):
"""Computes xent loss given the language model input activations.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: input ids. An int32 tensor of shape [time, batch].
paddings: a 0/1 tensor of shape [time, batch].
state0: A `.NestedMap` containing the initial recurrent state.
labels: If not None, a `.NestedMap` containing the following fields:
- class_weights, a tensor with shape [time, batch] containing the
weights for each target word.
- class_ids, a tensor with shape [time, batch] of int32 dtype containing
the target class labels.
- class_probabilities, a tensor with shape [time, batch, vocab_size] of
float values indicating class-membership probabilities.
direct_features:
If not None, a tensor of [time, batch, direct_feature_dims] that is
concatenated to the output of the last RNN layer.
Returns:
If `labels` is not None, returns (xent_output, state1), where
`xent_output` is a `.NestedMap` as defined by `SoftmaxLayer`'s return
value and `state1` is the next recurrent state. Otherwise,
`xent_output` only contains the softmax logits.
"""
p = self.params
ids = py_utils.HasRank(inputs, 2)
paddings = py_utils.HasShape(paddings, tf.shape(ids))
assert state0
def forward(activation):
# Dropout on embeddings is only applied in training.
if p.embedding_dropout_keep_prob < 1.0 and not p.is_eval:
activation = tf.nn.dropout(
activation,
keep_prob=p.embedding_dropout_keep_prob,
seed=p.embedding_dropout_seed)
return super(RnnLm, self).FProp(theta, activation, paddings, state0,
labels=labels,
direct_features=direct_features,
emb_weights=emb_weights,
chunk_ids=chunk_ids,
step_inference=step_inference,
ids=ids)
# TODO(jmluo) may wanna get rid of this assertion to obtain a baseline (nr > 0 but w/o HRR)
# also, should move this into __init__.
if p.num_word_roles > 0:
assert p.emb.cls == HRREmbeddingLayer
assert p.tie
if p.emb.cls == HRREmbeddingLayer:
activation, signature, emb_weights = self.emb.EmbLookup(theta.emb, ids)
else:
activation = self.emb.EmbLookup(theta.emb, ids)
emb_weights = None
if p.tie:
try:
num_shards = len(theta.emb.wm)
except:
num_shards = len(theta.emb.s.wm)
def transpose_or_not(w):
transpose = (p.softmax.num_sampled == 0)
if transpose:
return tf.transpose(w)
else:
return w
if p.emb.cls == HRREmbeddingLayer:
if p.num_word_roles > 0:
# for i in xrange(p.num_roles):
# softmax_theta = getattr(theta, 'softmax_%d' %i)
if p.emb.lazy:
pass # NOTE lazy mode means don't share the softmax weights directly
# for shard_ind in xrange(num_shards):
# theta.softmax['weight_%d' %shard_ind] = transpose_or_not(theta.emb.s.wm[shard_ind])
else:
for shard_ind in xrange(num_shards):
f_shard = emb_weights.f[shard_ind]
reshaped_f_shard = tf.reshape(f_shard, [-1, p.softmax.input_dim])
theta.softmax['weight_%d' %shard_ind] = transpose_or_not(reshaped_f_shard)
else:
for shard_ind in xrange(num_shards):
theta.softmax['weight_%d' %shard_ind] = transpose_or_not(emb.e[shard_ind])
else:
for shard_ind in xrange(num_shards):
main = transpose_or_not(theta.emb.wm[shard_ind])
theta.softmax['weight_%d' %shard_ind] = main
res = forward(activation)
xent_output = res[0]
return res
class MoeLm(BaseLanguageModel):
"""Mixture of experts language modeling class."""
@classmethod
def Params(cls):
p = super(MoeLm, cls).Params()
p.Define(
'emb',
layers.EmbeddingLayer.Params().Set(max_num_shards=1),
'The embedding layer params.')
p.Define('shared_emb', True, 'If true, uses a single embedding')
p.Define(
'add_postgating_rnn', True, 'If true, add an RNNLM post gating. '
'If false, add only a softmax on top.')
p.Define('rnns', rnn_layers.StackedFRNNLayerByLayer.Params(),
'The stacked-RNNs layer params.')
p.Define('number_of_experts', 7, 'Number of experts.')
p.Define('merge', RnnLmNoEmbedding.Params(),
'The LM to use for the merged LM')
return p
@base_layer.initializer
def __init__(self, params):
super(MoeLm, self).__init__(params)
p = self.params
if not isinstance(p.rnns.cell_tpl, (list, tuple)):
p.rnns.cell_tpl = [p.rnns.cell_tpl]
assert p.emb.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.emb.vocab_size, p.vocab_size))
assert p.emb.embedding_dim == p.rnns.cell_tpl[0].num_input_nodes, (
'{} vs. {}'.format(p.emb.embedding_dim,
p.rnns.cell_tpl[0].num_input_nodes))
if p.add_postgating_rnn:
assert p.merge.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.merge.vocab_size, p.vocab_size))
with tf.variable_scope(p.name):
# Embeddings
if p.shared_emb:
self.CreateChild('emb', p.emb)
else:
# 0-th embedding is for the domain predictor.
self.CreateChildren(
'emb', [
p.emb.Copy().Set(name='emb_%d' % i)
for i in range(1 + p.number_of_experts)
])
# Rnns
# 0-th rnns is for the domain predictor.
self.CreateChildren(
'rnns', [p.rnns.Copy() for i in range(1 + p.number_of_experts)])
# Softmax
rnn_output_size = _RnnOutputSize(p.rnns)
sm_params = layers.SimpleFullSoftmax.Params()
sm_params.name = 'domain_predictor_softmax'
sm_params.input_dim = rnn_output_size
sm_params.num_classes = p.number_of_experts
self.CreateChild('domain_predictor_softmax', sm_params)
# Merge
if p.add_postgating_rnn:
self.CreateChild('merge', p.merge)
else:
output_sm_params = layers.SimpleFullSoftmax.Params()
output_sm_params.name = 'output_softmax'
output_sm_params.input_dim = rnn_output_size
output_sm_params.num_classes = p.vocab_size
self.CreateChild('output_softmax', output_sm_params)
def zero_state(self, batch_size):
p = self.params
if p.add_postgating_rnn:
return py_utils.NestedMap(
rnns=[x.zero_state(batch_size) for x in self.rnns],
merge=self.merge.zero_state(batch_size))
else:
return py_utils.NestedMap(
rnns=[x.zero_state(batch_size) for x in self.rnns])
def FProp(self, theta, inputs, paddings, state0, labels=None):
"""Forward compute."""
p = self.params
ids = py_utils.HasRank(inputs, 2)
paddings = py_utils.HasShape(paddings, tf.shape(ids))
seqlen, batch = tf.unstack(tf.shape(inputs), num=2)
assert state0
paddings_3d = tf.expand_dims(paddings, axis=2)
# RNNs
if p.shared_emb:
emb_act = [self.emb.EmbLookup(theta.emb, inputs)
] * (1 + p.number_of_experts)
else:
emb_act = [
self.emb[i].EmbLookup(theta.emb[i], inputs)
for i in range(1 + p.number_of_experts)
]
state1 = py_utils.NestedMap(rnns=[])
rnns_act = []
for i, act in enumerate(emb_act):
act, state = self.rnns[i].FProp(theta.rnns[i], act, paddings_3d,
state0.rnns[i])
act = py_utils.HasRank(act, 3)
rnns_act += [act]
state1.rnns += [state]
# [time, batch, experts, dims].
expert_stacked = tf.stack(rnns_act[1:], axis=2)
# Compute gating softmax. The 0-th rnns is used as the expert
# predictor. Because SoftmaxLayer.Logits takes a matrix as input,
# we reshape rnns_act[0], the domain predictor activation, to a
# matrix here.
act = tf.reshape(rnns_act[0], [seqlen * batch, -1])
logits = self.domain_predictor_softmax.Logits(
theta.domain_predictor_softmax, act)
# [time, batch, experts]
gating = tf.reshape(tf.nn.softmax(logits), [seqlen, batch, -1])
# Mix the experts.
# [time, batch, dims]
combined = tf.squeeze(
tf.matmul(
# [time, batch, 1, experts]
tf.expand_dims(gating, axis=2),
# [time, batch, experts, dims]
expert_stacked),
axis=2)
if p.add_postgating_rnn:
# Note that this layer includes 1 or more RNN layers followed
# by a softmax.
xent_loss, state1.merge = self.merge.FProp(theta.merge, combined,
paddings, state0.merge, labels)
else:
xent_loss = self.output_softmax.FProp(
theta=theta.output_softmax,
inputs=combined,
class_weights=labels.class_weights,
class_ids=labels.class_ids)
# return xent_loss, state1
return xent_loss, state1
class TransformerLmNoEmbedding(BaseLanguageModel):
"""Transformer language model."""
@classmethod
def Params(cls):
p = super(TransformerLmNoEmbedding, cls).Params()
p.Define('position_emb', layers.PositionalEmbeddingLayer.Params(),
'Position embedding | |
# Copyright (C) 2019-2021 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import concurrent.futures
import threading
import time
from abc import ABC, abstractmethod
from aenum import Enum
from boltons.setutils import IndexedSet
from collections import OrderedDict
from concurrent.futures import TimeoutError as FutureTimeoutError
from concurrent.futures import CancelledError as FutureCancelledError
from logging import getLogger
from .utils import timestamp_now
from .utils.pomp_loop_thread import Future
from .event_marker import EventMarker
from .event import EventContext, MultipleEventContext
class ExpectPolicy(Enum):
wait, check, check_wait = range(3)
class ExpectationBase(ABC):
always_monitor = False
_eventloop_future_blocking = False
def __init__(self):
self._future = Future()
self._awaited = False
self._scheduler = None
self._success = False
self._timeout = None
self._deadline = None
self._timedout = False
self._scheduled_condition = threading.Condition()
def _schedule(self, scheduler):
# This expectation is scheduled on the `scheduler`, subclasses of ExpectationBase can
# perform some operations on this scheduler: schedule another expectation later or
# perform an operation on the scheduler object when this expectation is schedule (like
# sending a message for which this expectation object expect some result).
# IMPORTANT NOTE: this function (or its overridden versions) should be non-blocking
self._awaited = True
self._scheduler = scheduler
if self._future.loop is None:
self._future.loop = self._scheduler.expectation_loop
if self._timeout is not None:
self._deadline = timestamp_now() + self._timeout
with self._scheduled_condition:
self._scheduled_condition.notify_all()
def _await(self, scheduler):
ret = not self._awaited
self._awaited = True
self._scheduler = scheduler
return ret
def success(self):
return self._success
def wait(self, _timeout=None):
if self._awaited:
try:
self._future.result(timeout=_timeout)
except FutureTimeoutError:
self.set_timedout()
except FutureCancelledError:
self.cancel()
return self
def add_done_callback(self, cb):
self._future.add_done_callback(lambda f: cb(self))
def set_success(self):
if not self._future.done():
self._success = True
self._future.set_result(self.received_events())
return True
return False
def set_exception(self, exception):
if not self._future.done():
self._future.set_exception(exception)
def set_timeout(self, _timeout):
self._timeout = _timeout
def set_timedout(self):
if self._future.done():
return False
if not self._success:
self._timedout = True
self.cancel()
return True
return False
def cancel(self):
if self._future.done():
return False
self._future.cancel()
return True
def cancelled(self):
return self._future.cancelled()
def remaining_time(self):
remaining = self._deadline - timestamp_now()
return remaining if remaining > 0.0 else 0.0
def timedout(self):
if self._timedout:
return True
if self._success:
return False
if self._deadline is not None:
timedout = timestamp_now() > self._deadline
if timedout:
self.set_timedout()
return self._timedout
def base_copy(self, *args, **kwds):
other = self.__class__(*args, **kwds)
ExpectationBase.__init__(other)
other._timeout = self._timeout
return other
@abstractmethod
def copy(self):
"""
All expectations sublclasses must implement a shallow copy.
"""
pass
def explain(self):
return f"{self.__class__.__name__} is {bool(self)}"
def done(self):
return (self._future.done() or not self._awaited) and self._success
def __await__(self):
if not self.done():
self._eventloop_future_blocking = True
yield self
if not self.done():
raise RuntimeError("await wasn't used with future")
return self
def result(self):
return self._future.result()
def __bool__(self):
return self.done()
def __or__(self, other):
return WhenAnyExpectation([self, other])
def __and__(self, other):
return WhenAllExpectations([self, other])
def __rshift__(self, other):
return WhenSequenceExpectations([self, other])
def __str__(self):
return self.explain()
__repr__ = __str__
__nonzero__ = __bool__
class SuccessExpectation(ExpectationBase):
def __init__(self):
super().__init__()
self.set_success()
def copy(self):
return super().base_copy()
def received_events(self):
return None
class FailedExpectation(ExpectationBase):
def __init__(self, message):
super().__init__()
self._message = message
self.set_exception(RuntimeError(message))
def copy(self):
return super().base_copy(self._message)
def explain(self):
return self._message
class FutureExpectation(ExpectationBase):
def __init__(self, future, status_checker=lambda status: True):
super().__init__()
self._future = future
self._status_checker = status_checker
self._future.add_done_callback(self._on_done)
def _on_done(self, f):
if f.exception() is None:
self._success = self._status_checker(f.result())
def check(self, *args, **kwds):
return self
def copy(self):
return super().base_copy(self._future, self._status_checker)
class Expectation(ExpectationBase):
@abstractmethod
def check(self, *args, **kwds):
# IMPORTANT NOTE: this function (or its overridden versions) should be non-blocking
pass
@abstractmethod
def expected_events(self):
pass
@abstractmethod
def received_events(self):
"""
Returns a collection of events that have matched at least one of the
messages ID monitored by this expectation.
"""
pass
@abstractmethod
def matched_events(self):
"""
Returns a collection of events that have matched this expectation
(or a child expectation)
"""
pass
@abstractmethod
def unmatched_events(self):
"""
Returns a collection of events object that are still expected
"""
pass
def marked_events(self, default_marked_events=EventMarker.unmatched):
"""
Returns a collection of events with matched/unmatched markers.
"""
if self._success:
return self.expected_events()._set_marker(EventMarker.matched)
else:
return self.expected_events()._set_marker(default_marked_events)
def explain(self):
"""
Returns a debug string that explain this expectation current state.
"""
try:
return str(self.marked_events())
except Exception:
getLogger("olympe.expectations").exception("")
return None
class CheckWaitStateExpectationMixin:
def __init__(self, check_expectation, wait_expectation):
super().__init__()
self._check_expectation = check_expectation
self._wait_expectation = wait_expectation
self._checked = False
def _await(self, scheduler):
ret = all(
list(
map(
lambda e: e._await(scheduler),
(super(), self._check_expectation, self._wait_expectation),
)
)
)
if not ret:
return False
self._checked = self._check_expectation.success()
self._success = self._checked
if self._success:
self.set_success()
return ret
def _schedule(self, scheduler):
super()._schedule(scheduler)
self._check_expectation._schedule(scheduler)
self._checked = self._check_expectation.success()
self._success = self._checked
if not self._success:
scheduler._schedule(
self._wait_expectation, monitor=self._wait_expectation.always_monitor
)
else:
self.set_success()
def copy(self):
other = super().base_copy(
self._check_expectation.copy(), self._wait_expectation.copy()
)
return other
def check(self, *args, **kwds):
if not self._checked and self._wait_expectation.check(*args, **kwds).success():
self.set_success()
return self
def expected_events(self):
if self._checked:
return EventContext(
self._check_expectation.expected_events().events(),
ExpectPolicy.check_wait,
)
else:
return EventContext(
self._wait_expectation.expected_events().events(),
ExpectPolicy.check_wait,
)
def received_events(self):
if self._checked:
return self._check_expectation.received_events()
else:
return self._wait_expectation.received_events()
def matched_events(self):
if self._checked:
return EventContext(self._check_expectation.matched_events().events())
else:
return EventContext(self._wait_expectation.matched_events().events())
def unmatched_events(self):
if self._checked:
return EventContext(self._check_expectation.unmatched_events().events())
else:
return EventContext(self._wait_expectation.unmatched_events().events())
def set_timeout(self, _timeout):
super().set_timeout(_timeout)
self._wait_expectation.set_timeout(_timeout)
def timedout(self):
if self._checked:
return False
else:
if self._wait_expectation.timedout():
self.set_timedout()
return self._wait_expectation.timedout()
def cancelled(self):
return self._wait_expectation.cancelled()
class CheckWaitStateExpectation(CheckWaitStateExpectationMixin, Expectation):
pass
class MultipleExpectationMixin:
def __init__(self, expectations=None):
super().__init__()
if expectations is None:
self.expectations = []
else:
self.expectations = expectations
self.matched_expectations = IndexedSet()
def _await(self, scheduler):
ret = True
if not super()._await(scheduler):
ret = False
if not all(list(map(lambda e: e._await(scheduler), self.expectations))):
ret = False
return ret
def copy(self):
other = super().base_copy(list(map(lambda e: e.copy(), self.expectations)))
return other
def append(self, expectation):
if not isinstance(expectation, self.__class__):
self.expectations.append(expectation)
else:
self.expectations.extend(expectation.expectations)
return self
def expected_events(self):
return MultipleEventContext(
list(map(lambda e: e.expected_events(), self.expectations)),
self._combine_method(),
)
def received_events(self):
return MultipleEventContext(
list(map(lambda e: e.received_events(), self.expectations)),
self._combine_method(),
)
def matched_events(self):
return MultipleEventContext(
list(map(lambda e: e.matched_events(), self.matched_expectations)),
self._combine_method(),
)
def unmatched_events(self):
return MultipleEventContext(
list(map(lambda e: e.unmatched_events(), self.unmatched_expectations())),
self._combine_method(),
)
def unmatched_expectations(self):
for expectation in self.expectations:
if expectation not in self.matched_expectations:
yield expectation
def __iter__(self):
return iter(self.expectations)
def __len__(self):
return len(self.expectations)
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, repr(self.expectations))
@abstractmethod
def _combine_method(self):
pass
def marked_events(self, default_marked_events=EventMarker.unmatched):
if self._success:
default_marked_events = EventMarker.ignored
return MultipleEventContext(
list(
map(lambda e: e.marked_events(default_marked_events), self.expectations)
),
self._combine_method(),
)
def as_completed(self, expected_count=None, timeout=None):
end_time = None
if timeout is not None:
end_time = timeout + time.monotonic()
if not self._scheduled_condition.wait_for(
lambda: self._awaited, timeout=timeout
):
raise FutureTimeoutError()
done = set()
if timeout is not None:
timeout = end_time - time.monotonic()
while timeout is None or timeout > 0:
fs = OrderedDict(
[(e._future, e) for e in self.expectations if e not in done]
)
for f in concurrent.futures.as_completed(fs.keys(), timeout=timeout):
yield fs[f]
if timeout is not None:
timeout = end_time - time.monotonic()
done.add(fs[f])
done_count = len(done)
if expected_count is None:
if done_count == len(self.expectations):
return
elif done_count == expected_count:
return
if timeout is not None:
timeout = end_time - time.monotonic()
raise FutureTimeoutError()
class MultipleExpectation(MultipleExpectationMixin, Expectation):
pass
class WhenAnyExpectationMixin:
def _schedule(self, scheduler):
super()._schedule(scheduler)
for expectation in self.expectations:
scheduler._schedule(expectation, monitor=expectation.always_monitor)
if expectation.success():
self.matched_expectations.add(expectation)
self.set_success()
break
if self.success():
return
if all(expectation.cancelled() for expectation in self.expectations):
self.cancel()
def timedout(self):
if super().timedout():
return True
elif all(map(lambda e: e.timedout(), self.expectations)):
| |
<filename>cartography/intel/aws/ecs.py<gh_stars>0
import logging
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
import boto3
import neo4j
from cartography.util import aws_handle_regions
from cartography.util import camel_to_snake
from cartography.util import dict_date_to_epoch
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
@aws_handle_regions(default_return_value=([], []))
def get_ecs_clusters(boto3_session: boto3.session.Session, region: str) -> Tuple[List[str], List[Dict[str, Any]]]:
client = boto3_session.client('ecs', region_name=region)
paginator = client.get_paginator('list_clusters')
clusters: List[Dict[str, Any]] = []
cluster_arns: List[str] = []
for page in paginator.paginate():
cluster_arns.extend(page.get('clusterArns', []))
# TODO: also include attachment info, and make relationships between the attachements
# and the cluster.
includes = ['SETTINGS', 'CONFIGURATIONS']
for i in range(0, len(cluster_arns), 100):
cluster_arn_chunk = cluster_arns[i:i + 100]
cluster_chunk = client.describe_clusters(clusters=cluster_arn_chunk, include=includes)
clusters.extend(cluster_chunk.get('clusters', []))
return (cluster_arns, clusters)
@timeit
@aws_handle_regions
def get_ecs_container_instances(
cluster_arn: str,
boto3_session: boto3.session.Session,
region: str,
) -> List[Dict[str, Any]]:
client = boto3_session.client('ecs', region_name=region)
paginator = client.get_paginator('list_container_instances')
container_instances: List[Dict[str, Any]] = []
container_instance_arns: List[str] = []
for page in paginator.paginate(cluster=cluster_arn):
container_instance_arns.extend(page.get('containerInstanceArns', []))
includes = ['CONTAINER_INSTANCE_HEALTH']
for i in range(0, len(container_instance_arns), 100):
container_instance_arn_chunk = container_instance_arns[i:i + 100]
container_instance_chunk = client.describe_container_instances(
cluster=cluster_arn,
containerInstances=container_instance_arn_chunk,
include=includes,
)
container_instances.extend(container_instance_chunk.get('containerInstances', []))
return container_instances
@timeit
@aws_handle_regions
def get_ecs_services(cluster_arn: str, boto3_session: boto3.session.Session, region: str) -> List[Dict[str, Any]]:
client = boto3_session.client('ecs', region_name=region)
paginator = client.get_paginator('list_services')
services: List[Dict[str, Any]] = []
service_arns: List[str] = []
for page in paginator.paginate(cluster=cluster_arn):
service_arns.extend(page.get('serviceArns', []))
for i in range(0, len(service_arns), 10):
service_arn_chunk = service_arns[i:i + 10]
service_chunk = client.describe_services(
cluster=cluster_arn,
services=service_arn_chunk,
)
services.extend(service_chunk.get('services', []))
return services
@timeit
@aws_handle_regions
def get_ecs_task_definitions(boto3_session: boto3.session.Session, region: str) -> List[Dict[str, Any]]:
client = boto3_session.client('ecs', region_name=region)
paginator = client.get_paginator('list_task_definitions')
task_definitions: List[Dict[str, Any]] = []
task_definition_arns: List[str] = []
for page in paginator.paginate():
task_definition_arns.extend(page.get('taskDefinitionArns', []))
for arn in task_definition_arns:
task_definition = client.describe_task_definition(
taskDefinition=arn,
)
task_definitions.append(task_definition['taskDefinition'])
return task_definitions
@timeit
@aws_handle_regions
def get_ecs_tasks(cluster_arn: str, boto3_session: boto3.session.Session, region: str) -> List[Dict[str, Any]]:
client = boto3_session.client('ecs', region_name=region)
paginator = client.get_paginator('list_tasks')
tasks: List[Dict[str, Any]] = []
task_arns: List[str] = []
for page in paginator.paginate(cluster=cluster_arn):
task_arns.extend(page.get('taskArns', []))
for i in range(0, len(task_arns), 100):
task_arn_chunk = task_arns[i:i + 100]
task_chunk = client.describe_tasks(
cluster=cluster_arn,
tasks=task_arn_chunk,
)
tasks.extend(task_chunk.get('tasks', []))
return tasks
@timeit
def load_ecs_clusters(
neo4j_session: neo4j.Session,
data: List[Dict[str, Any]],
region: str,
current_aws_account_id: str,
aws_update_tag: int,
) -> None:
ingest_clusters = """
UNWIND {Clusters} AS cluster
MERGE (c:ECSCluster{id: cluster.clusterArn})
ON CREATE SET c.firstseen = timestamp()
SET c.name = cluster.clusterName, c.region = {Region},
c.arn = cluster.clusterArn,
c.ecc_kms_key_id = cluster.configuration.executeCommandConfiguration.kmsKeyId,
c.ecc_logging = cluster.configuration.executeCommandConfiguration.logging,
c.ecc_log_configuration_cloud_watch_log_group_name = cluster.configuration.executeCommandConfiguration.logConfiguration.cloudWatchLogGroupName,
c.ecc_log_configuration_cloud_watch_encryption_enabled = cluster.configuration.executeCommandConfiguration.logConfiguration.cloudWatchEncryptionEnabled,
c.ecc_log_configuration_s3_bucket_name = cluster.configuration.executeCommandConfiguration.logConfiguration.s3BucketName,
c.ecc_log_configuration_s3_encryption_enabled = cluster.configuration.executeCommandConfiguration.logConfiguration.s3EncryptionEnabled,
c.ecc_log_configuration_s3_key_prefix = cluster.configuration.executeCommandConfiguration.logConfiguration.s3KeyPrefix,
c.status = cluster.status,
c.settings_container_insights = cluster.settings_container_insights,
c.capacity_providers = cluster.capacityProviders,
c.attachments_status = cluster.attachmentsStatus,
c.lastupdated = {aws_update_tag}
WITH c
MATCH (owner:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (owner)-[r:RESOURCE]->(c)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
""" # noqa:E501
clusters: List[Dict[str, Any]] = []
for cluster in data:
for setting in cluster.get("settings", []):
setting_name = camel_to_snake(setting["name"])
cluster[f"settings_{setting_name}"] = setting["value"]
clusters.append(cluster)
neo4j_session.run(
ingest_clusters,
Clusters=clusters,
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
@timeit
def load_ecs_container_instances(
neo4j_session: neo4j.Session,
cluster_arn: str,
data: List[Dict[str, Any]],
region: str,
current_aws_account_id: str,
aws_update_tag: int,
) -> None:
ingest_instances = """
UNWIND {Instances} AS instance
MERGE (i:ECSContainerInstance{id: instance.containerInstanceArn})
ON CREATE SET i.firstseen = timestamp()
SET i.ec2_instance_id = instance.ec2InstanceId, i.region = {Region},
i.arn = instance.containerInstanceArn,
i.capacity_provider_name = instance.capacityProviderName,
i.version = instance.version,
i.version_info_agent_version = instance.versionInfo.agentVersion,
i.version_info_agent_hash = instance.versionInfo.agentHash,
i.version_info_agent_docker_version = instance.versionInfo.dockerVersion,
i.status = instance.status,
i.status_reason = instance.statusReason,
i.agent_connected = instance.agentConnected,
i.agent_update_status = instance.agentUpdateStatus,
i.registered_at = instance.registeredAt,
i.lastupdated = {aws_update_tag}
WITH i
MATCH (c:ECSCluster{id: {ClusterARN}})
MERGE (c)-[r:HAS_CONTAINER_INSTANCE]->(i)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
instances: List[Dict[str, Any]] = []
for instance in data:
instance['registeredAt'] = dict_date_to_epoch(instance, 'registeredAt')
instances.append(instance)
neo4j_session.run(
ingest_instances,
ClusterARN=cluster_arn,
Instances=instances,
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
@timeit
def load_ecs_services(
neo4j_session: neo4j.Session,
cluster_arn: str,
data: List[Dict[str, Any]],
region: str,
current_aws_account_id: str,
aws_update_tag: int,
) -> None:
ingest_services = """
UNWIND {Services} AS service
MERGE (s:ECSService{id: service.serviceArn})
ON CREATE SET s.firstseen = timestamp()
SET s.name = service.serviceName, s.region = {Region},
s.arn = service.serviceArn,
s.cluster_arn = service.clusterArn,
s.status = service.status,
s.desired_count = service.desiredCount,
s.running_count = service.runningCount,
s.pending_count = service.pendingCount,
s.launch_type = service.launchType,
s.platform_version = service.platformVersion,
s.platform_family = service.platformFamily,
s.task_definition = service.taskDefinition,
s.deployment_config_circuit_breaker_enable = service.deploymentConfiguration.deploymentCircuitBreaker.enable,
s.deployment_config_circuit_breaker_rollback = service.deploymentConfiguration.deploymentCircuitBreaker.rollback,
s.deployment_config_maximum_percent = service.deploymentConfiguration.maximumPercent,
s.deployment_config_minimum_healthy_percent = service.deploymentConfiguration.minimumHealthyPercent,
s.role_arn = service.roleArn,
s.created_at = service.createdAt,
s.health_check_grace_period_seconds = service.healthCheckGracePeriodSeconds,
s.created_by = service.createdBy,
s.enable_ecs_managed_tags = service.enableECSManagedTags,
s.propagate_tags = service.propagateTags,
s.enable_execute_command = service.enableExecuteCommand,
s.lastupdated = {aws_update_tag}
WITH s
MATCH (c:ECSCluster{id: {ClusterARN}})
MERGE (c)-[r:HAS_SERVICE]->(s)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
WITH s
MATCH (d:ECSTaskDefinition{id: s.task_definition})
MERGE (s)-[r2:HAS_TASK_DEFINITION]->(d)
ON CREATE SET r2.firstseen = timestamp()
SET r2.lastupdated = {aws_update_tag}
""" # noqa:E501
services: List[Dict[str, Any]] = []
for service in data:
service['createdAt'] = dict_date_to_epoch(service, 'createdAt')
services.append(service)
neo4j_session.run(
ingest_services,
ClusterARN=cluster_arn,
Services=services,
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
@timeit
def load_ecs_task_definitions(
neo4j_session: neo4j.Session,
data: List[Dict[str, Any]],
region: str,
current_aws_account_id: str,
aws_update_tag: int,
) -> None:
ingest_task_definitions = """
UNWIND {Definitions} AS def
MERGE (d:ECSTaskDefinition{id: def.taskDefinitionArn})
ON CREATE SET d.firstseen = timestamp()
SET d.arn = def.taskDefinitionArn, d.region = {Region},
d.family = def.family,
d.task_role_arn = def.taskRoleArn,
d.execution_role_arn = def.executionRoleArn,
d.network_mode = def.networkMode,
d.revision = def.revision,
d.status = def.status,
d.compatibilities = def.compatibilities,
d.runtime_platform_cpu_architecture = def.runtimePlatform.cpuArchitecture,
d.runtime_platform_operating_system_family = def.runtimePlatform.operatingSystemFamily,
d.requires_compatibilities = def.requiresCompatibilities,
d.cpu = def.cpu,
d.memory = def.memory,
d.pid_mode = def.pidMode,
d.ipc_mode = def.ipcMode,
d.proxy_configuration_type = def.proxyConfiguration.type,
d.proxy_configuration_container_name = def.proxyConfiguration.containerName,
d.registered_at = def.registeredAt,
d.deregistered_at = def.deregisteredAt,
d.registered_by = def.registeredBy,
d.ephemeral_storage_size_in_gib = def.ephemeralStorage.sizeInGiB,
d.lastupdated = {aws_update_tag}
WITH d
MATCH (owner:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (owner)-[r:RESOURCE]->(d)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
container_definitions: List[Dict[str, Any]] = []
task_definitions: List[Dict[str, Any]] = []
for task_definition in data:
task_definition['registeredAt'] = dict_date_to_epoch(task_definition, 'registeredAt')
task_definition['deregisteredAt'] = dict_date_to_epoch(task_definition, 'deregisteredAt')
for container in task_definition.get("containerDefinitions", []):
container["_taskDefinitionArn"] = task_definition["taskDefinitionArn"]
container_definitions.append(container)
task_definitions.append(task_definition)
neo4j_session.run(
ingest_task_definitions,
Definitions=task_definitions,
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
load_ecs_container_definitions(
neo4j_session,
container_definitions,
region,
current_aws_account_id,
aws_update_tag,
)
@timeit
def load_ecs_tasks(
neo4j_session: neo4j.Session,
cluster_arn: str,
data: List[Dict[str, Any]],
region: str,
current_aws_account_id: str,
aws_update_tag: int,
) -> None:
ingest_tasks = """
UNWIND {Tasks} AS task
MERGE (t:ECSTask{id: task.taskArn})
ON CREATE SET t.firstseen = timestamp()
SET t.arn = task.taskArn, t.region = {Region},
t.availability_zone = task.availabilityZone,
t.capacity_provider_name = task.capacityProviderName,
t.cluster_arn = task.clusterArn,
t.connectivity = task.connectivity,
t.connectivity_at = task.connectivityAt,
t.container_instance_arn = task.containerInstanceArn,
t.cpu = task.cpu,
t.created_at = task.createdAt,
t.desired_status = task.desiredStatus,
t.enable_execute_command = task.enableExecuteCommand,
t.execution_stopped_at = task.executionStoppedAt,
t.group = task.group,
t.health_status = task.healthStatus,
t.last_status = task.lastStatus,
t.launch_type = task.launchType,
t.memory = task.memory,
t.platform_version = task.platformVersion,
t.platform_family = task.platformFamily,
t.pull_started_at = task.pullStartedAt,
t.pull_stopped_at = task.pullStoppedAt,
t.started_at = task.startedAt,
t.started_by = task.startedBy,
t.stop_code = task.stopCode,
t.stopped_at = task.stoppedAt,
t.stopped_reason = task.stoppedReason,
t.stopping_at = task.stoppingAt,
t.task_definition_arn = task.taskDefinitionArn,
t.version = task.version,
t.ephemeral_storage_size_in_gib = task.ephemeralStorage.sizeInGiB,
t.lastupdated = {aws_update_tag}
WITH t
MATCH (c:ECSCluster{id: {ClusterARN}})
MERGE (c)-[r:HAS_TASK]->(t)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
WITH t
MATCH (td:ECSTaskDefinition{id: t.task_definition_arn})
MERGE (t)-[r2:HAS_TASK_DEFINITION]->(td)
ON CREATE SET r2.firstseen = timestamp()
SET r2.lastupdated = {aws_update_tag}
WITH t
MATCH (ci:ECSContainerInstance{id: t.container_instance_arn})
MERGE (ci)-[r3:HAS_TASK]->(t)
ON CREATE SET r3.firstseen = timestamp()
SET r3.lastupdated = {aws_update_tag}
"""
containers: List[Dict[str, Any]] = []
tasks: List[Dict[str, Any]] = []
for task in data:
task['connectivityAt'] = dict_date_to_epoch(task, 'connectivityAt')
task['createdAt'] = dict_date_to_epoch(task, 'createdAt')
task['executionStoppedAt'] = dict_date_to_epoch(task, 'executionStoppedAt')
task['pullStartedAt'] = dict_date_to_epoch(task, 'pullStartedAt')
task['pullStoppedAt'] = dict_date_to_epoch(task, 'pullStoppedAt')
task['startedAt'] = dict_date_to_epoch(task, 'startedAt')
task['stoppedAt'] = dict_date_to_epoch(task, 'stoppedAt')
task['stoppingAt'] = dict_date_to_epoch(task, 'stoppingAt')
containers.extend(task["containers"])
tasks.append(task)
neo4j_session.run(
ingest_tasks,
ClusterARN=cluster_arn,
Tasks=tasks,
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
load_ecs_containers(
neo4j_session,
containers,
region,
current_aws_account_id,
aws_update_tag,
)
@timeit
def load_ecs_container_definitions(
neo4j_session: neo4j.Session,
data: List[Dict[str, Any]],
region: str,
current_aws_account_id: str,
aws_update_tag: int,
) -> None:
ingest_definitions = """
UNWIND {Definitions} AS def
MERGE (d:ECSContainerDefinition{id: def._taskDefinitionArn + "-" + def.name})
ON CREATE SET d.firstseen = timestamp()
SET d.task_definition_arn = def._taskDefinitionArn, d.region = {Region},
d.name = def.name,
d.image = def.image,
d.cpu = def.cpu,
d.memory = def.memory,
d.memory_reservation = def.memoryReservation,
d.links = def.links,
d.essential = def.essential,
d.entry_point = def.entryPoint,
d.command = def.command,
d.start_timeout = def.startTimeout,
d.stop_timeout = def.stop_timeout,
d.hostname = def.hostname,
d.user = def.user,
d.working_directory = def.workingDirectory,
d.disable_networking = def.disableNetworking,
d.privileged = def.privileged,
d.readonly_root_filesystem = def.readonlyRootFilesystem,
d.dns_servers = def.dnsServers,
d.dns_search_domains = def.dnsSearchDomains,
d.docker_security_options = def.dockerSecurityOptions,
d.interactive = def.interactive,
d.pseudo_terminal = def.pseudoTerminal,
d.lastupdated = {aws_update_tag}
WITH d
MATCH (td:ECSTaskDefinition{id: d.task_definition_arn})
MERGE (td)-[r:HAS_CONTAINER_DEFINITION]->(d)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
neo4j_session.run(
ingest_definitions,
Definitions=data,
Region=region,
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
@timeit
def load_ecs_containers(
neo4j_session: neo4j.Session,
data: List[Dict[str, Any]],
region: str,
current_aws_account_id: str,
aws_update_tag: int,
) -> None:
ingest_containers = """
UNWIND {Containers} AS container
MERGE (c:ECSContainer{id: container.containerArn})
ON CREATE SET c.firstseen | |
B539CCE3 409D13CD 566AFBB4 8D6C0191 81E1BCFE 94B30269
EDFE72FE 9B6AA4BD 7B5A0F1C 71CFFF4C 19C418E1 F6EC0179
81BC087F 2A7065B3 84B890D3 191F2BFA
'''),
p=int_from_hex('''
AD107E1E 9123A9D0 D660FAA7 9559C51F A20D64E5 683B9FD1
B54B1597 B61D0A75 E6FA141D F95A56DB AF9A3C40 7BA1DF15
EB3D688A 309C180E 1DE6B85A 1274A0A6 6D3F8152 AD6AC212
9037C9ED EFDA4DF8 D91E8FEF 55B7394B 7AD5B7D0 B6C12207
C9F98D11 ED34DBF6 C6BA0B2C 8BBC27BE 6A00E0A0 B9C49708
B3BF8A31 70918836 81286130 BC8985DB 1602E714 415D9330
278273C7 DE31EFDC 7310F712 1FD5A074 15987D9A DC0A486D
CDF93ACC 44328387 315D75E1 98C641A4 80CD86A1 B9E587E8
BE60E69C C928B2B9 C52172E4 13042E9B 23F10B0E 16E79763
C9B53DCF 4BA80A29 E3FB73C1 6B8E75B9 7EF363E2 FFA31F71
CF9DE538 4E71B81C 0AC4DFFE 0C10E64F
'''),
length=2048,
),
Group(
name='2048-bit MODP from RFC 5114',
g=int_from_hex('''
3FB32C9B 73134D0B 2E775066 60EDBD48 4CA7B18F 21EF2054
07F4793A 1A0BA125 10DBC150 77BE463F FF4FED4A AC0BB555
BE3A6C1B 0C6B47B1 BC3773BF 7E8C6F62 901228F8 C28CBB18
A55AE313 41000A65 0196F931 C77A57F2 DDF463E5 E9EC144B
777DE62A AAB8A862 8AC376D2 82D6ED38 64E67982 428EBC83
1D14348F 6F2F9193 B5045AF2 767164E1 DFC967C1 FB3F2E55
A4BD1BFF E83B9C80 D052B985 D182EA0A DB2A3B73 13D3FE14
C8484B1E 052588B9 B7D2BBD2 DF016199 ECD06E15 57CD0915
B3353BBB 64E0EC37 7FD02837 0DF92B52 C7891428 CDC67EB6
184B523D 1DB246C3 2F630784 90F00EF8 D647D148 D4795451
5E2327CF EF98C582 664B4C0F 6CC41659
'''),
p=int_from_hex('''
87A8E61D B4B6663C FFBBD19C 65195999 8CEEF608 660DD0F2
5D2CEED4 435E3B00 E00DF8F1 D61957D4 FAF7DF45 61B2AA30
16C3D911 34096FAA 3BF4296D 830E9A7C 209E0C64 97517ABD
5A8A9D30 6BCF67ED 91F9E672 5B4758C0 22E0B1EF 4275BF7B
6C5BFC11 D45F9088 B941F54E B1E59BB8 BC39A0BF 12307F5C
4FDB70C5 81B23F76 B63ACAE1 CAA6B790 2D525267 35488A0E
F13C6D9A 51BFA4AB 3AD83477 96524D8E F6A167B5 A41825D9
67E144E5 14056425 1CCACB83 E6B486F6 B3CA3F79 71506026
C0B857F6 89962856 DED4010A BD0BE621 C3A3960A 54E710C3
75F26375 D7014103 A4B54330 C198AF12 6116D227 6E11715F
693877FA D7EF09CA DB094AE9 1E1A1597
'''),
length=2048,
),
Group(
name='2048-bit FFDHE group from RFC 7919',
g=2,
p=int_from_hex('''
FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1
D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9
7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561
2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935
984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735
30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB
B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19
0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61
9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73
3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA
886B4238 61285C97 FFFFFFFF FFFFFFFF
'''),
length=2048,
),
Group(
name='3072-bit FFDHE group from RFC 7919',
g=2,
p=int_from_hex('''
FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1
D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9
7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561
2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935
984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735
30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB
B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19
0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61
9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73
3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA
886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238
61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C
AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3
64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D
ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF
3C1B20EE 3FD59D7C 25E41D2B 66C62E37 FFFFFFFF FFFFFFFF
'''),
length=3072,
),
Group(
name='4096-bit FFDHE group from RFC 7919',
g=2,
p=int_from_hex('''
FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1
D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9
7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561
2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935
984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735
30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB
B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19
0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61
9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73
3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA
886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238
61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C
AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3
64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D
ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF
3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB
7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004
87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832
A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A
1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF
8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E655F6A
FFFFFFFF FFFFFFFF
'''),
length=4096,
),
Group(
name='6144-bit FFDHE group from RFC 7919',
g=2,
p=int_from_hex('''
FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1
D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9
7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561
2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935
984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735
30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB
B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19
0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61
9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73
3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA
886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238
61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C
AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3
64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D
ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF
3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB
7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004
87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832
A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A
1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF
8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E0DD902
0BFD64B6 45036C7A 4E677D2C 38532A3A 23BA4442 CAF53EA6
3BB45432 9B7624C8 917BDD64 B1C0FD4C B38E8C33 4C701C3A
CDAD0657 FCCFEC71 9B1F5C3E 4E46041F 388147FB 4CFDB477
A52471F7 A9A96910 B855322E DB6340D8 A00EF092 350511E3
0ABEC1FF F9E3A26E 7FB29F8C 183023C3 587E38DA 0077D9B4
763E4E4B 94B2BBC1 94C6651E 77CAF992 EEAAC023 2A281BF6
B3A739C1 22611682 0AE8DB58 47A67CBE F9C9091B 462D538C
D72B0374 6AE77F5E 62292C31 1562A846 505DC82D B854338A
E49F5235 C95B9117 8CCF2DD5 CACEF403 EC9D1810 C6272B04
5B3B71F9 DC6B80D6 3FDD4A8E 9ADB1E69 62A69526 D43161C1
A41D570D 7938DAD4 A40E329C D0E40E65 FFFFFFFF FFFFFFFF
'''),
length=6144,
),
Group(
name='8192-bit FFDHE group from RFC 7919',
g=2,
p=int_from_hex('''
FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1
D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9
7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561
2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935
984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735
30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB
B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19
0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61
9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73
3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA
886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238
61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C
AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3
64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D
ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF
3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB
7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004
87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832
A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A
1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF
8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E0DD902
0BFD64B6 45036C7A 4E677D2C 38532A3A 23BA4442 CAF53EA6
3BB45432 9B7624C8 917BDD64 B1C0FD4C B38E8C33 4C701C3A
CDAD0657 FCCFEC71 9B1F5C3E 4E46041F 388147FB 4CFDB477
A52471F7 A9A96910 B855322E DB6340D8 A00EF092 350511E3
0ABEC1FF F9E3A26E 7FB29F8C 183023C3 587E38DA 0077D9B4
763E4E4B 94B2BBC1 94C6651E 77CAF992 EEAAC023 2A281BF6
B3A739C1 22611682 0AE8DB58 47A67CBE F9C9091B 462D538C
D72B0374 6AE77F5E 62292C31 1562A846 505DC82D B854338A
E49F5235 C95B9117 8CCF2DD5 CACEF403 EC9D1810 C6272B04
5B3B71F9 DC6B80D6 3FDD4A8E 9ADB1E69 62A69526 D43161C1
A41D570D 7938DAD4 A40E329C CFF46AAA 36AD004C F600C838
1E425A31 D951AE64 FDB23FCE C9509D43 687FEB69 EDD1CC5E
0B8CC3BD F64B10EF 86B63142 A3AB8829 555B2F74 7C932665
CB2C0F1C C01BD702 29388839 D2AF05E4 54504AC7 8B758282
2846C0BA 35C35F5C 59160CC0 46FD8251 541FC68C 9C86B022
BB709987 6A460E74 51A8A931 09703FEE 1C217E6C 3826E52C
51AA691E 0E423CFC 99E9E316 50C1217B 624816CD AD9A95F9
D5B80194 88D9C0A0 A1FE3075 A577E231 83F81D4A 3F2FA457
1EFC8CE0 BA8A4FE8 B6855DFE 72B0A66E DED2FBAB FBE58A30
FAFABE1C 5D71A87E 2F741EF8 C1FE86FE A6BBFDE5 30677F0D
97D11D49 F7A8443D 0822E506 A9F4614E 011E2A94 838FF88C
D68C8BB7 C5C6424C FFFFFFFF FFFFFFFF
'''),
length=8192,
),
# from https://github.com/openssl/openssl/blob/d02b48c/apps/s_server.c
Group(
name='OpenSSL 512-bit',
g=int_from_c_array('0x02,'),
p=int_from_c_array('''
0xDA,0x58,0x3C,0x16,0xD9,0x85,0x22,0x89,0xD0,0xE4,0xAF,0x75,
0x6F,0x4C,0xCA,0x92,0xDD,0x4B,0xE5,0x33,0xB8,0x04,0xFB,0x0F,
0xED,0x94,0xEF,0x9C,0x8A,0x44,0x03,0xED,0x57,0x46,0x50,0xD3,
0x69,0x99,0xDB,0x29,0xD7,0x76,0x27,0x6B,0xA2,0xD3,0xD4,0x12,
0xE2,0x18,0xF4,0xDD,0x1E,0x08,0x4C,0xF6,0xD8,0x00,0x3E,0x7C,
0x47,0x74,0xE8,0x33,
'''),
length=512,
),
# from https://github.com/apache/httpd/blob/7e5c359/modules/ssl/ssl_engine_dh.c
Group(
name='Apache httpd 512-bit',
g=int_from_c_array('0x02,'),
p=int_from_c_array('''
0xD4, 0xBC, 0xD5, 0x24, 0x06, 0xF6, 0x9B, 0x35, 0x99, 0x4B, 0x88, 0xDE,
0x5D, 0xB8, 0x96, 0x82, 0xC8, 0x15, 0x7F, 0x62, 0xD8, 0xF3, 0x36, 0x33,
0xEE, 0x57, 0x72, 0xF1, 0x1F, 0x05, 0xAB, 0x22, 0xD6, 0xB5, 0x14, 0x5B,
0x9F, 0x24, 0x1E, 0x5A, 0xCC, 0x31, 0xFF, 0x09, 0x0A, 0x4B, 0xC7, 0x11,
0x48, 0x97, 0x6F, 0x76, 0x79, 0x50, 0x94, 0xE7, 0x1E, 0x79, 0x03, 0x52,
0x9F, 0x5A, 0x82, 0x4B,
'''),
length=512,
),
Group(
name='Apache httpd 1024-bit',
g=int_from_c_array('0x02,'),
p=int_from_c_array('''
0xE6, 0x96, 0x9D, 0x3D, 0x49, 0x5B, 0xE3, 0x2C, 0x7C, 0xF1, 0x80, 0xC3,
0xBD, 0xD4, 0x79, 0x8E, 0x91, 0xB7, 0x81, 0x82, 0x51, 0xBB, 0x05, 0x5E,
0x2A, 0x20, 0x64, 0x90, 0x4A, 0x79, 0xA7, 0x70, 0xFA, 0x15, 0xA2, 0x59,
0xCB, 0xD5, 0x23, 0xA6, 0xA6, 0xEF, 0x09, 0xC4, 0x30, 0x48, 0xD5, 0xA2,
0x2F, 0x97, 0x1F, 0x3C, 0x20, 0x12, 0x9B, 0x48, 0x00, 0x0E, 0x6E, 0xDD,
0x06, 0x1C, 0xBC, 0x05, 0x3E, 0x37, 0x1D, 0x79, 0x4E, 0x53, 0x27, 0xDF,
0x61, 0x1E, 0xBB, 0xBE, 0x1B, 0xAC, 0x9B, 0x5C, 0x60, 0x44, 0xCF, 0x02,
0x3D, 0x76, 0xE0, 0x5E, 0xEA, 0x9B, 0xAD, 0x99, 0x1B, 0x13, 0xA6, 0x3C,
0x97, 0x4E, 0x9E, 0xF1, 0x83, 0x9E, 0xB5, 0xDB, 0x12, 0x51, 0x36, 0xF7,
0x26, 0x2E, 0x56, 0xA8, 0x87, 0x15, 0x38, 0xDF, 0xD8, 0x23, 0xC6, 0x50,
0x50, 0x85, 0xE2, 0x1F, 0x0D, 0xD5, 0xC8, 0x6B,
'''),
length=1024,
),
# from https://github.com/apache/httpd/blob/aaa5749/modules/ssl/ssl_engine_dh.c
Group(
name='Apache httpd 512-bit',
g=int_from_c_array('0x02,'),
p=int_from_c_array('''
0x9F, 0xDB, 0x8B, 0x8A, 0x00, 0x45, 0x44, 0xF0, 0x04, 0x5F, 0x17, 0x37,
0xD0, 0xBA, 0x2E, 0x0B, 0x27, 0x4C, 0xDF, 0x1A, | |
path='path path/dir', port='999',
fragment='moresup', scheme='sup', host='host')
assert str(f.path) == '/path%20path/dir'
assert f.url == 'sup://host:999/path%20path/dir?a=a+a#moresup'
# Path as a list of path segments to join.
assert f is f.set(path=['d1', 'd2'])
assert f.url == 'sup://host:999/d1/d2?a=a+a#moresup'
assert f is f.add(path=['/d3/', '/d4/'])
assert f.url == 'sup://host:999/d1/d2/%2Fd3%2F/%2Fd4%2F?a=a+a#moresup'
# Set a lot of stuff (but avoid conflicts, which are tested
# below).
f.set(
query_params={'k': 'k'}, fragment_path='no scrubs', scheme='morp',
host='myhouse', port=69, path='j$j*m#n', fragment_args={'f': 'f'})
assert f.url == 'morp://myhouse:69/j$j*m%23n?k=k#no%20scrubs?f=f'
# No side effects.
oldurl = f.url
with self.assertRaises(ValueError):
f.set(args={'a': 'a a'}, path='path path/dir', port='INVALID_PORT',
fragment='moresup', scheme='sup', host='host')
assert f.url == oldurl
with warnings.catch_warnings(record=True) as w1:
self.assertRaises(
ValueError, f.set, netloc='nope.com:99', port='NOPE')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
assert f.url == oldurl
# Separator isn't reset with set().
f = furl.Fragment()
f.separator = False
f.set(path='flush', args={'dad': 'nope'})
assert str(f) == 'flushdad=nope'
# Test warnings for potentially overlapping parameters.
f = furl.furl('http://pumps.com')
warnings.simplefilter("always")
# Scheme, origin overlap. Scheme takes precedence.
with warnings.catch_warnings(record=True) as w1:
f.set(scheme='hi', origin='bye://sup.sup')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
assert f.scheme == 'hi'
# Netloc, origin, host and/or port. Host and port take precedence.
with warnings.catch_warnings(record=True) as w1:
f.set(netloc='dumps.com:99', origin='sup://pumps.com:88')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
with warnings.catch_warnings(record=True) as w1:
f.set(netloc='dumps.com:99', host='ohay.com')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
assert f.host == 'ohay.com'
assert f.port == 99
with warnings.catch_warnings(record=True) as w2:
f.set(netloc='dumps.com:99', port=88)
assert len(w2) == 1 and issubclass(w2[0].category, UserWarning)
assert f.port == 88
with warnings.catch_warnings(record=True) as w2:
f.set(origin='http://dumps.com:99', port=88)
assert len(w2) == 1 and issubclass(w2[0].category, UserWarning)
assert f.port == 88
with warnings.catch_warnings(record=True) as w3:
f.set(netloc='dumps.com:99', host='ohay.com', port=88)
assert len(w3) == 1 and issubclass(w3[0].category, UserWarning)
# Query, args, and query_params overlap - args and query_params
# take precedence.
with warnings.catch_warnings(record=True) as w4:
f.set(query='yosup', args={'a': 'a', 'b': 'b'})
assert len(w4) == 1 and issubclass(w4[0].category, UserWarning)
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
with warnings.catch_warnings(record=True) as w5:
f.set(query='yosup', query_params={'a': 'a', 'b': 'b'})
assert len(w5) == 1 and issubclass(w5[0].category, UserWarning)
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
with warnings.catch_warnings(record=True) as w6:
f.set(args={'a': 'a', 'b': 'b'}, query_params={'c': 'c', 'd': 'd'})
assert len(w6) == 1 and issubclass(w6[0].category, UserWarning)
assert self._param(f.url, 'c', 'c')
assert self._param(f.url, 'd', 'd')
# Fragment, fragment_path, fragment_args, and fragment_separator
# overlap - fragment_separator, fragment_path, and fragment_args
# take precedence.
with warnings.catch_warnings(record=True) as w7:
f.set(fragment='hi', fragment_path='!', fragment_args={'a': 'a'},
fragment_separator=False)
assert len(w7) == 1 and issubclass(w7[0].category, UserWarning)
assert str(f.fragment) == '!a=a'
with warnings.catch_warnings(record=True) as w8:
f.set(fragment='hi', fragment_path='bye')
assert len(w8) == 1 and issubclass(w8[0].category, UserWarning)
assert str(f.fragment) == 'bye'
with warnings.catch_warnings(record=True) as w9:
f.set(fragment='hi', fragment_args={'a': 'a'})
assert len(w9) == 1 and issubclass(w9[0].category, UserWarning)
assert str(f.fragment) == 'hia=a'
with warnings.catch_warnings(record=True) as w10:
f.set(fragment='!?a=a', fragment_separator=False)
assert len(w10) == 1 and issubclass(w10[0].category, UserWarning)
assert str(f.fragment) == '!a=a'
def test_remove(self):
url = ('http://u:p@host:69/a/big/path/?a=a&b=b&s=s+s#a frag?with=args'
'&a=a')
f = furl.furl(url)
# Remove without parameters removes nothing.
assert f.url == f.remove().url
# username, password, and port must be True.
assert f == f.copy().remove(
username='nope', password='<PASSWORD>', port='nope')
# Basics.
assert f is f.remove(fragment=True, args=['a', 'b'], path='path/',
username=True, password=True, port=True)
assert f.url == 'http://host/a/big/?s=s+s'
# scheme, host, port, netloc, origin.
f = furl.furl('https://host:999/path')
assert f.copy().remove(scheme=True).url == '//host:999/path'
assert f.copy().remove(host=True).url == 'https://:999/path'
assert f.copy().remove(port=True).url == 'https://host/path'
assert f.copy().remove(netloc=True).url == 'https:///path'
assert f.copy().remove(origin=True).url == '/path'
# No errors are thrown when removing URL components that don't exist.
f = furl.furl(url)
assert f is f.remove(fragment_path=['asdf'], fragment_args=['asdf'],
args=['asdf'], path=['ppp', 'ump'])
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
assert self._param(f.url, 's', 's s')
assert str(f.path) == '/a/big/path/'
assert str(f.fragment.path) == 'a%20frag'
assert f.fragment.args == {'a': 'a', 'with': 'args'}
# Path as a list of paths to join before removing.
assert f is f.remove(fragment_path='a frag', fragment_args=['a'],
query_params=['a', 'b'], path=['big', 'path', ''],
port=True)
assert f.url == 'http://u:p@host/a/?s=s+s#with=args'
assert f is f.remove(
path=True, query=True, fragment=True, username=True,
password=True)
assert f.url == 'http://host'
def test_join(self):
empty_tests = ['', '/meat', '/meat/pump?a=a&b=b#fragsup',
'sup://www.pumps.org/brg/pap/mrf?a=b&c=d#frag?sup', ]
run_tests = [
# Join full URLs.
('unknown://pepp.ru', 'unknown://pepp.ru'),
('unknown://pepp.ru?one=two&three=four',
'unknown://pepp.ru?one=two&three=four'),
('unknown://pepp.ru/new/url/?one=two#blrp',
'unknown://pepp.ru/new/url/?one=two#blrp'),
# Absolute paths ('/foo').
('/pump', 'unknown://pepp.ru/pump'),
('/pump/2/dump', 'unknown://pepp.ru/pump/2/dump'),
('/pump/2/dump/', 'unknown://pepp.ru/pump/2/dump/'),
# Relative paths ('../foo').
('./crit/', 'unknown://pepp.ru/pump/2/dump/crit/'),
('.././../././././srp', 'unknown://pepp.ru/pump/2/srp'),
('../././../nop', 'unknown://pepp.ru/nop'),
# Query included.
('/erp/?one=two', 'unknown://pepp.ru/erp/?one=two'),
('morp?three=four', 'unknown://pepp.ru/erp/morp?three=four'),
('/root/pumps?five=six',
'unknown://pepp.ru/root/pumps?five=six'),
# Fragment included.
('#sup', 'unknown://pepp.ru/root/pumps?five=six#sup'),
('/reset?one=two#yepYEP',
'unknown://pepp.ru/reset?one=two#yepYEP'),
('./slurm#uwantpump?', 'unknown://pepp.ru/slurm#uwantpump?'),
# Unicode.
('/?kødpålæg=4', 'unknown://pepp.ru/?k%C3%B8dp%C3%A5l%C3%A6g=4'),
(u'/?kødpålæg=4', 'unknown://pepp.ru/?k%C3%B8dp%C3%A5l%C3%A6g=4'),
]
for test in empty_tests:
f = furl.furl().join(test)
assert f.url == test
f = furl.furl('')
for join, result in run_tests:
assert f is f.join(join) and f.url == result
# Join other furl object, which serialize to strings with str().
f = furl.furl('')
for join, result in run_tests:
tojoin = furl.furl(join)
assert f is f.join(tojoin) and f.url == result
# Join multiple URLs.
f = furl.furl('')
f.join('path', 'tcp://blorp.biz', 'http://pepp.ru/', 'a/b/c',
'#uwantpump?')
assert f.url == 'http://pepp.ru/a/b/c#uwantpump?'
# In edge cases (e.g. URLs without an authority/netloc), behave
# identically to urllib.parse.urljoin(), which changed behavior in
# Python 3.9.
f = furl.furl('wss://slrp.com/').join('foo:1')
if sys.version_info[:2] < (3, 9):
assert f.url == 'wss://slrp.com/foo:1'
else:
assert f.url == 'foo:1'
f = furl.furl('wss://slrp.com/').join('foo:1:rip')
assert f.url == 'foo:1:rip'
f = furl.furl('scheme:path').join('foo:blah')
assert f.url == 'foo:blah'
def test_tostr(self):
f = furl.furl('http://blast.off/?a+b=c+d&two%20tap=cat%20nap%24%21')
assert f.tostr() == f.url
assert (f.tostr(query_delimiter=';') ==
'http://blast.off/?a+b=c+d;two+tap=cat+nap%24%21')
assert (f.tostr(query_quote_plus=False) ==
'http://blast.off/?a%20b=c%20d&two%20tap=cat%20nap%24%21')
assert (f.tostr(query_delimiter=';', query_quote_plus=False) ==
'http://blast.off/?a%20b=c%20d;two%20tap=cat%20nap%24%21')
assert (f.tostr(query_quote_plus=False, query_dont_quote=True) ==
'http://blast.off/?a%20b=c%20d&two%20tap=cat%20nap$!')
# query_dont_quote ignores invalid query characters, like '$'.
assert (f.tostr(query_quote_plus=False, query_dont_quote='$') ==
'http://blast.off/?a%20b=c%20d&two%20tap=cat%20nap$%21')
url = 'https://klugg.com/?hi=*'
url_encoded = 'https://klugg.com/?hi=%2A&url='
f = furl.furl(url).set(args=[('hi', '*'), ('url', url)])
assert f.tostr() == url_encoded + quote_plus(url)
assert f.tostr(query_dont_quote=True) == url + '&url=' + url
assert f.tostr(query_dont_quote='*') == (
url + '&url=' + quote_plus(url, '*'))
def test_equality(self):
assert furl.furl() is not furl.furl() and furl.furl() == furl.furl()
assert furl.furl() is not None
url = 'https://www.yahoo.co.uk/one/two/three?a=a&b=b&m=m%26m#fragment'
assert furl.furl(url) != url # No furl to string comparisons.
assert furl.furl(url) == furl.furl(url)
assert furl.furl(url).remove(path=True) != furl.furl(url)
def test_urlsplit(self):
# Without any delimiters like '://' or '/', the input should be
# treated as a path.
urls = ['sup', '127.0.0.1', 'www.google.com', '192.168.1.1:8000']
for url in urls:
assert isinstance(furl.urlsplit(url), SplitResult)
assert furl.urlsplit(url).path == urlsplit(url).path
# No changes to existing urlsplit() behavior for known schemes.
url = 'http://www.pumps.com/'
assert isinstance(furl.urlsplit(url), SplitResult)
assert furl.urlsplit(url) == urlsplit(url)
url = 'https://www.yahoo.co.uk/one/two/three?a=a&b=b&m=m%26m#fragment'
assert isinstance(furl.urlsplit(url), SplitResult)
assert furl.urlsplit(url) == urlsplit(url)
# Properly split the query from the path for unknown schemes.
url = 'unknown://www.yahoo.com?one=two&three=four'
correct = ('unknown', 'www.yahoo.com', '', 'one=two&three=four', '')
assert isinstance(furl.urlsplit(url), SplitResult)
assert furl.urlsplit(url) == correct
url = 'sup://192.168.1.102:8080///one//two////?s=kwl%20string#frag'
correct = ('sup', '192.168.1.102:8080', '///one//two////',
's=kwl%20string', 'frag')
assert isinstance(furl.urlsplit(url), SplitResult)
assert furl.urlsplit(url) == correct
url = 'crazyyy://www.yahoo.co.uk/one/two/three?a=a&b=b&m=m%26m#frag'
correct = ('crazyyy', 'www.yahoo.co.uk', '/one/two/three',
'a=a&b=b&m=m%26m', 'frag')
assert isinstance(furl.urlsplit(url), SplitResult)
assert furl.urlsplit(url) == correct
def test_join_path_segments(self):
jps = furl.join_path_segments
# Empty.
assert jps() == []
assert jps([]) == []
assert jps([], [], [], []) == []
# Null strings.
# [''] means nothing, or an empty string, in the final path
# segments.
# ['', ''] is preserved as a slash in the final path segments.
assert jps(['']) == []
assert jps([''], ['']) == []
assert jps([''], [''], ['']) == []
assert jps([''], ['', '']) == ['', '']
assert jps([''], [''], [''], ['']) == []
assert jps(['', ''], ['', '']) == ['', '', '']
assert jps(['', '', ''], ['', '']) == ['', '', '', '']
assert jps(['', '', '', '', '', '']) == ['', '', '', '', '', '']
assert jps(['', '', '', ''], ['', '']) == ['', '', '', '', '']
assert jps(['', '', '', ''], ['', ''], ['']) == ['', '', '', '', '']
assert jps(['', '', '', ''], ['', '', '']) == ['', '', '', '', '', '']
# Basics.
assert jps(['a']) == ['a']
assert jps(['a', 'b']) == ['a', 'b']
assert jps(['a'], ['b']) == ['a', 'b']
assert jps(['1', '2', '3'], ['4', '5']) == ['1', '2', '3', '4', '5']
# A trailing slash is preserved if no new slash is being added.
# ex: ['a', ''] + ['b'] == ['a', 'b'], or 'a/' + 'b' == 'a/b'
assert jps(['a', ''], ['b']) == ['a', 'b']
assert jps(['a'], [''], ['b']) == ['a', 'b']
assert jps(['', 'a', ''], ['b']) == ['', 'a', 'b']
assert jps(['', | |
<reponame>kamadorueda/oblivion
# Standard imports
from typing import Tuple
# pylint: disable=too-many-lines
# Primes up to 4 ** 8 * 16 (1048576)
PRIMES: Tuple[int, ...] = (
2, 3, 5, 7, 11, 13, 17, 19,
23, 29, 31, 37, 41, 43, 47, 53,
59, 61, 67, 71, 73, 79, 83, 89,
97, 101, 103, 107, 109, 113, 127, 131,
137, 139, 149, 151, 157, 163, 167, 173,
179, 181, 191, 193, 197, 199, 211, 223,
227, 229, 233, 239, 241, 251, 257, 263,
269, 271, 277, 281, 283, 293, 307, 311,
313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409,
419, 421, 431, 433, 439, 443, 449, 457,
461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569,
571, 577, 587, 593, 599, 601, 607, 613,
617, 619, 631, 641, 643, 647, 653, 659,
661, 673, 677, 683, 691, 701, 709, 719,
727, 733, 739, 743, 751, 757, 761, 769,
773, 787, 797, 809, 811, 821, 823, 827,
829, 839, 853, 857, 859, 863, 877, 881,
883, 887, 907, 911, 919, 929, 937, 941,
947, 953, 967, 971, 977, 983, 991, 997,
1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049,
1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097,
1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163,
1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,
1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283,
1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321,
1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423,
1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459,
1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511,
1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571,
1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619,
1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693,
1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747,
1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811,
1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877,
1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949,
1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003,
2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069,
2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129,
2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203,
2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267,
2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311,
2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377,
2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423,
2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503,
2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579,
2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657,
2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693,
2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741,
2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801,
2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861,
2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939,
2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011,
3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079,
3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167,
3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221,
3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301,
3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347,
3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413,
3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491,
3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541,
3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607,
3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671,
3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727,
3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797,
3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863,
3877, 3881, 3889, 3907, 3911, 3917, 3919, 3923,
3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003,
4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057,
4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129,
4133, 4139, 4153, 4157, 4159, 4177, 4201, 4211,
4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259,
4261, 4271, 4273, 4283, 4289, 4297, 4327, 4337,
4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409,
4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481,
4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547,
4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621,
4637, 4639, 4643, 4649, 4651, 4657, 4663, 4673,
4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751,
4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813,
4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909,
4919, 4931, 4933, 4937, 4943, 4951, 4957, 4967,
4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011,
5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087,
5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167,
5171, 5179, 5189, 5197, 5209, 5227, 5231, 5233,
5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309,
5323, 5333, 5347, 5351, 5381, 5387, 5393, 5399,
5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443,
5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507,
5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573,
5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653,
5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711,
5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791,
5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849,
5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897,
5903, 5923, 5927, 5939, 5953, 5981, 5987, 6007,
6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073,
6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133,
6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211,
6217, 6221, 6229, 6247, 6257, 6263, 6269, 6271,
6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329,
6337, 6343, 6353, 6359, 6361, 6367, 6373, 6379,
6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473,
6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563,
6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637,
6653, 6659, 6661, 6673, 6679, 6689, 6691, 6701,
6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779,
6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833,
6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907,
6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971,
6977, 6983, 6991, 6997, 7001, 7013, 7019, 7027,
7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121,
7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207,
7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253,
7283, 7297, 7307, 7309, 7321, 7331, 7333, 7349,
7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457,
7459, 7477, 7481, 7487, 7489, 7499, 7507, 7517,
7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561,
7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621,
7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691,
7699, 7703, 7717, 7723, 7727, 7741, 7753, 7757,
7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853,
7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919,
7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009,
8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087,
8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161,
8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231,
8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291,
8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369,
8377, 8387, 8389, 8419, 8423, 8429, 8431, 8443,
8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537,
8539, 8543, 8563, 8573, 8581, 8597, 8599, 8609,
8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677,
8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731,
8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803,
8807, 8819, 8821, 8831, 8837, 8839, 8849, 8861,
8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941,
8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011,
9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091,
9103, 9109, 9127, 9133, 9137, 9151, 9157, 9161,
9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227,
9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311,
9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377,
9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433,
9437, 9439, 9461, 9463, 9467, 9473, 9479, 9491,
9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587,
9601, 9613, 9619, 9623, 9629, 9631, 9643, 9649,
9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733,
9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791,
9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857,
9859, 9871, 9883, 9887, 9901, 9907, 9923, 9929,
9931, 9941, 9949, 9967, 9973, 10007, 10009, 10037,
10039, 10061, 10067, 10069, 10079, 10091, 10093, 10099,
10103, 10111, 10133, 10139, 10141, 10151, 10159, 10163,
10169, 10177, 10181, 10193, 10211, 10223, 10243, 10247,
10253, 10259, 10267, | |
= models.CharField(max_length=100, blank=True, null=True)
main = models.BooleanField(default=False)
main_unit = models.CharField(max_length=20, blank=True, null=True)
sub_unit = models.CharField(max_length=20, blank=True, null=True)
age = models.PositiveIntegerField(blank=True, null=True)
school = models.CharField(max_length=100, blank=True, null=True)
birthday = models.DateField(null=True, blank=True, default=None)
astrological_sign = models.CharField(max_length=20, blank=True, null=True)
blood = models.CharField(max_length=3, blank=True, null=True)
height = models.PositiveIntegerField(blank=True, null=True)
measurements = models.CharField(max_length=20, blank=True, null=True)
favorite_food = models.CharField(max_length=100, blank=True, null=True)
least_favorite_food = models.CharField(max_length=100, blank=True, null=True)
hobbies = models.CharField(max_length=100, blank=True, null=True)
attribute = models.CharField(choices=ATTRIBUTE_CHOICES, max_length=6)
year = models.CharField(max_length=10, blank=True, null=True)
cv = models.CharField(max_length=100, blank=True, null=True)
cv_url = models.CharField(max_length=200, blank=True, null=True)
cv_nickname = models.CharField(max_length=20, blank=True, null=True)
cv_twitter = models.CharField(max_length=200, blank=True, null=True)
cv_instagram = models.CharField(max_length=200, blank=True, null=True)
official_url = models.CharField(max_length=200, blank=True, null=True)
summary = models.TextField(null=True, blank=True)
def __unicode__(self):
return self.name
@property
def short_name(self):
return self.name.split(' ')[-1]
admin.site.register(Idol)
# minimum, maximum, promo
SKILL_SLOTS_MINMAX = {
'N': [0, 1, 0],
'R': [1, 2, 1],
'SR': [2, 4, 1],
'SSR': [3, 6, 2],
'UR': [4, 8, 2],
}
class Card(ExportModelOperationsMixin('Card'), models.Model):
id = models.PositiveIntegerField(unique=True, help_text="Number of the card in the album", primary_key=3)
game_id = models.PositiveIntegerField(unique=True, null=True)
idol = models.ForeignKey(Idol, related_name='cards', blank=True, null=True, on_delete=models.SET_NULL)
japanese_collection = models.CharField(max_length=100, blank=True, null=True)
english_collection = models.CharField(max_length=100, blank=True, null=True)
translated_collection = models.CharField(max_length=100, blank=True, null=True)
rarity = models.CharField(choices=RARITY_CHOICES, max_length=10)
attribute = models.CharField(choices=ATTRIBUTE_CHOICES, max_length=6)
is_promo = models.BooleanField(default=False, help_text="Promo cards are already idolized. It is not possible to scout them, since they come with bought items or in the game on special occasions.")
promo_item = models.CharField(max_length=100, blank=True, null=True)
promo_link = models.CharField(max_length=300, blank=True, null=True)
release_date = models.DateField(default=datetime.date(2013, 4, 16), null=True, blank=True)
event = models.ForeignKey(Event, related_name='cards', blank=True, null=True, on_delete=models.SET_NULL)
other_event = models.ForeignKey(Event, related_name='other_cards', blank=True, null=True, on_delete=models.SET_NULL)
is_special = models.BooleanField(default=False, help_text="Special cards cannot be added in a team but they can be used in training.")
japan_only = models.BooleanField(default=True)
seal_shop = models.BooleanField(default=False)
hp = models.PositiveIntegerField(null=True, default=0, blank=True)
minimum_statistics_smile = models.PositiveIntegerField(null=True)
minimum_statistics_pure = models.PositiveIntegerField(null=True)
minimum_statistics_cool = models.PositiveIntegerField(null=True)
non_idolized_maximum_statistics_smile = models.PositiveIntegerField(null=True)
non_idolized_maximum_statistics_pure = models.PositiveIntegerField(null=True)
non_idolized_maximum_statistics_cool = models.PositiveIntegerField(null=True)
idolized_maximum_statistics_smile = models.PositiveIntegerField(null=True)
idolized_maximum_statistics_pure = models.PositiveIntegerField(null=True)
idolized_maximum_statistics_cool = models.PositiveIntegerField(null=True)
skill = models.TextField(null=True, blank=True)
japanese_skill = models.TextField(null=True, blank=True)
skill_details = models.TextField(null=True, blank=True)
japanese_skill_details = models.TextField(null=True, blank=True)
center_skill = models.TextField(null=True, blank=True)
center_skill_extra_type = models.CharField(choices=CENTER_SKILL_TYPE_CHOICES, null=True, blank=True, max_length=20)
transparent_image = models.ImageField(upload_to='cards/transparent/', null=True, blank=True)
transparent_idolized_image = models.ImageField(upload_to='cards/transparent/', null=True, blank=True)
card_image = models.ImageField(upload_to='c/', null=True, blank=True)
card_idolized_image = models.ImageField(upload_to='c/', null=True, blank=True)
english_card_image = models.ImageField(upload_to='cards/', null=True, blank=True)
english_card_idolized_image = models.ImageField(upload_to='cards/', null=True, blank=True)
round_card_image = models.ImageField(upload_to='c/', null=True, blank=True)
round_card_idolized_image = models.ImageField(upload_to='c/', null=True, blank=True)
english_round_card_image = models.ImageField(upload_to='cards/', null=True, blank=True)
english_round_card_idolized_image = models.ImageField(upload_to='cards/', null=True, blank=True)
video_story = models.CharField(max_length=300, blank=True, null=True)
japanese_video_story = models.CharField(max_length=300, blank=True, null=True)
_skill_up_cards = models.CharField(max_length=300, blank=True, null=True)
ur_pair = models.ForeignKey('self', related_name='other_ur_pair', on_delete=models.SET_NULL, null=True, blank=True)
ur_pair_reverse = models.BooleanField(default=False)
ur_pair_idolized_reverse = models.BooleanField(default=False)
clean_ur = models.ImageField(upload_to='web/static/cards/ur_pairs/', null=True, blank=True)
clean_ur_idolized = models.ImageField(upload_to='web/static/cards/ur_pairs/', null=True, blank=True)
# cache
total_owners = models.PositiveIntegerField(null=True, blank=True)
total_wishlist = models.PositiveIntegerField(null=True, blank=True)
ranking_attribute = models.PositiveIntegerField(null=True, blank=True)
ranking_rarity = models.PositiveIntegerField(null=True, blank=True)
ranking_special = models.PositiveIntegerField(null=True, blank=True)
name = models.CharField(max_length=100, blank=True)
japanese_name = models.CharField(max_length=100, blank=True, null=True)
idol_school = models.CharField(max_length=100, blank=True, null=True)
idol_year = models.CharField(max_length=10, blank=True, null=True)
idol_main_unit = models.CharField(max_length=20, blank=True, null=True)
idol_sub_unit = models.CharField(max_length=20, blank=True, null=True)
event_japanese_name = models.CharField(max_length=100, blank=True, null=True)
event_english_name = models.CharField(max_length=100, blank=True, null=True)
event_image = models.CharField(max_length=200, null=True, blank=True)
other_event_japanese_name = models.CharField(max_length=100, blank=True, null=True)
other_event_english_name = models.CharField(max_length=100, blank=True, null=True)
other_event_image = models.CharField(max_length=200, null=True, blank=True)
ur_pair_name = models.CharField(max_length=100, blank=True)
ur_pair_round_card_image = models.CharField(max_length=200, null=True, blank=True)
ur_pair_attribute = models.CharField(choices=ATTRIBUTE_CHOICES, max_length=6, blank=True, null=True)
@property
def min_skill_slot(self):
if self.is_promo and False:
return SKILL_SLOTS_MINMAX[self.rarity][2]
return SKILL_SLOTS_MINMAX[self.rarity][0]
@property
def max_skill_slot(self):
if self.is_promo and False:
return SKILL_SLOTS_MINMAX[self.rarity][2]
return SKILL_SLOTS_MINMAX[self.rarity][1]
@property
def short_name(self):
return self.name.split(' ')[-1]
def japanese_attribute(self):
return japanese_attribute(self.attribute)
def get_owned_cards_for_account(self, account):
return OwnedCard.objects.filter(owner_account=account, card=self)
def __unicode__(self):
return u'#' + unicode(self.id) + u' ' + unicode(self.name) + u' ' + unicode(self.rarity)
def get_center_skill_details(self):
try:
attribute, skill = self.center_skill.split(' ')
if skill in CENTER_SKILL_UR:
if CENTER_SKILL_UR[skill] != attribute:
return CENTER_SKILL_SENTENCES['differentUR'], [attribute, CENTER_SKILL_UR[skill]]
return CENTER_SKILL_SENTENCES['UR'], [attribute]
return CENTER_SKILL_SENTENCES[skill], [attribute]
except (ValueError, AttributeError, KeyError):
return None, None
@property
def center_skill_extra_type_sentence(self):
if self.center_skill_extra_type == 'main_unit':
return self.idol_main_unit
elif self.center_skill_extra_type == 'sub_unit':
return self.idol_sub_unit
elif self.center_skill_extra_type == 'year':
return _(u'{} years').format(self.idol_year)
@property
def center_skill_extra(self):
if not self.center_skill_extra_type: return None
return EXTRA_CENTER_SKILL_SENTENCE.format(
type=self.center_skill_extra_type_sentence,
attribute=self.attribute,
points=CENTER_EXTRA_POINTS[(self.rarity, self.center_skill_extra_type)],
)
@property
def ur_pair_japanese_attribute(self):
return japanese_attribute(self.ur_pair_attribute)
@property
def skill_up_cards(self):
if not self._skill_up_cards:
return []
return [(int(s.split('-')[0]), s.split('-')[-1]) for s in self._skill_up_cards.split(',')]
@property
def url(self):
return singlecardurl(self)
admin.site.register(Card)
class Account(ExportModelOperationsMixin('Account'), models.Model):
owner = models.ForeignKey(User, related_name='accounts_set')
nickname = models.CharField(_("Nickname"), blank=True, max_length=20)
friend_id = models.PositiveIntegerField(_("Friend ID"), blank=True, null=True, help_text=_('You can find your friend id by going to the "Friends" section from the home, then "ID Search". Players will be able to send you friend requests or messages using this number.'))
show_friend_id = models.BooleanField('', default=True, help_text=_('Should your friend ID be visible to other players?'))
accept_friend_requests = models.NullBooleanField(_('Accept friend requests'), blank=True, null=True)
transfer_code = models.CharField(_("Transfer Code"), blank=True, max_length=100, help_text=_('It\'s important to always have an active transfer code, since it will allow you to retrieve your account in case you loose your device. We can store it for you here: only you will be able to see it. To generate it, go to the settings and use the first button below the one to change your name in the first tab.'))
device = models.CharField(_('Device'), help_text=_('The modele of your device. Example: Nexus 5, iPhone 4, iPad 2, ...'), max_length=150, null=True, blank=True)
play_with = models.CharField(_('Play with'), blank=True, null=True, max_length=30, choices=PLAYWITH_CHOICES)
language = models.CharField(_("Language"), choices=LANGUAGE_CHOICES, default='JP', max_length=10, help_text=_('This is the version of the game you play.'))
os = models.CharField(_("Operating System"), choices=OS_CHOICES, default='iOs', max_length=10)
center = models.ForeignKey('OwnedCard', verbose_name=_("Center"), null=True, blank=True, help_text=_('The character that talks to you on your home screen.'), on_delete=models.SET_NULL)
rank = models.PositiveIntegerField(_("Rank"), blank=True, null=True)
verified = models.PositiveIntegerField(_("Verified"), default=0, choices=VERIFIED_CHOICES)
default_tab = models.CharField(_('Default tab'), max_length=30, choices=ACCOUNT_TAB_CHOICES, help_text=_('What people see first when they take a look at your account.'), default='deck')
starter = models.ForeignKey(Card, verbose_name=_("Starter"), null=True, blank=True, help_text=_('The character that you selected when you started playing.'), on_delete=models.SET_NULL)
creation = models.DateField(blank=True, null=True, verbose_name=_('Creation'), help_text=_('When you started playing with this account.'))
show_creation = models.BooleanField('', default=True, help_text=_('Should this date be visible to other players?'))
loveca = models.PositiveIntegerField(_('Love gems'), help_text=string_concat(_('Number of {} you currently have in your account.').format(_('Love gems')), ' ', _('This field is completely optional, it\'s here to help you manage your accounts.')), default=0)
friend_points = models.PositiveIntegerField(_('Friend Points'), help_text=string_concat(_('Number of {} you currently have in your account.').format(_('Friend Points')), ' ', _('This field is completely optional, it\'s here to help you manage your accounts.')), default=0)
g = models.PositiveIntegerField('G', help_text=string_concat(_('Number of {} you currently have in your account.').format('G'), ' ', _('This field is completely optional, it\'s here to help you manage your accounts.')), default=0)
tickets = models.PositiveIntegerField('Scouting Tickets', help_text=string_concat(_('Number of {} you currently have in your account.').format('Scouting Tickets'), ' ', _('This field is completely optional, it\'s here to help you manage your accounts.')), default=0)
vouchers = models.PositiveIntegerField('Vouchers (blue tickets)', help_text=string_concat(_('Number of {} you currently have in your account.').format('Vouchers (blue tickets)'), ' ', _('This field is completely optional, it\'s here to help you manage your accounts.')), default=0)
bought_loveca = models.PositiveIntegerField(_('Total love gems bought'), help_text=_('You can calculate that number in "Other" then "Purchase History". Leave it empty to stay F2P.'), null=True, blank=True)
show_items = models.BooleanField('', default=True, help_text=_('Should your items be visible to other players?'))
fake = models.BooleanField(_('Fake'), default=False)
# Cache
owner_username = models.CharField(max_length=32, null=True, blank=True)
center_card_transparent_image = models.CharField(max_length=200, null=True, blank=True)
center_card_round_image = models.CharField(max_length=200, null=True, blank=True)
center_card_attribute = models.CharField(choices=ATTRIBUTE_CHOICES, max_length=6, blank=True, null=True)
center_card_id = models.PositiveIntegerField(default=0)
center_alt_text = models.CharField(max_length=100, null=True, blank=True)
ranking = models.PositiveIntegerField(null=True, blank=True)
@property
def website_url(self):
return 'http://schoolido.lu/user/{}/#{}'.format(self.owner_username, self.id)
@property
def money_spent(self):
if not self.bought_loveca:
return None
return int(round(self.bought_loveca * settings.LOVECA_PRICE))
@property
def days_played(self):
if not self.creation:
return None
today = datetime.date.today()
return (today - self.creation).days
def _get_starter_idol(self):
a = (e for e in raw_information.items() if e[1]['starter'] == self.starter_id).next()
return a
@property
def starter_card_round_image(self):
if not self.starter_id:
return None
return 'c/' + str(self.starter_id) + 'Round' + self._get_starter_idol()[0].split(' ')[-1] + '.png'
@property
def starter_name(self):
if not self.starter_id:
return None
return self._get_starter_idol()[0]
@property
def starter_attribute(self):
if not self.starter_id:
return None
return 'Smile'
@property
def starter_alt_text(self):
if not self.starter_id:
return None
return "#{} {} R".format(self.starter_id, self._get_starter_idol()[0])
def __unicode__(self):
return (unicode(self.owner.username) if self.nickname == '' else unicode(self.nickname)) + u' ' + unicode(self.language)
admin.site.register(Account)
class OwnedCard(ExportModelOperationsMixin('OwnedCard'), models.Model):
owner_account = | |
spec_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spec_hash", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class ResourceGroupSpecArgs:
def __init__(__self__, *,
location: pulumi.Input[str]):
"""
ResourceGroupSpec defines the desired state of ResourceGroup
:param pulumi.Input[str] location: INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file
"""
pulumi.set(__self__, "location", location)
@property
@pulumi.getter
def location(self) -> pulumi.Input[str]:
"""
INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: pulumi.Input[str]):
pulumi.set(self, "location", value)
@pulumi.input_type
class ResourceGroupStatusArgs:
def __init__(__self__, *,
completed: Optional[pulumi.Input[str]] = None,
contains_update: Optional[pulumi.Input[bool]] = None,
failed_provisioning: Optional[pulumi.Input[bool]] = None,
flattened_secrets: Optional[pulumi.Input[bool]] = None,
message: Optional[pulumi.Input[str]] = None,
output: Optional[pulumi.Input[str]] = None,
polling_url: Optional[pulumi.Input[str]] = None,
provisioned: Optional[pulumi.Input[bool]] = None,
provisioning: Optional[pulumi.Input[bool]] = None,
requested: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
spec_hash: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None):
"""
ASOStatus (AzureServiceOperatorsStatus) defines the observed state of resource actions
"""
if completed is not None:
pulumi.set(__self__, "completed", completed)
if contains_update is not None:
pulumi.set(__self__, "contains_update", contains_update)
if failed_provisioning is not None:
pulumi.set(__self__, "failed_provisioning", failed_provisioning)
if flattened_secrets is not None:
pulumi.set(__self__, "flattened_secrets", flattened_secrets)
if message is not None:
pulumi.set(__self__, "message", message)
if output is not None:
pulumi.set(__self__, "output", output)
if polling_url is not None:
pulumi.set(__self__, "polling_url", polling_url)
if provisioned is not None:
pulumi.set(__self__, "provisioned", provisioned)
if provisioning is not None:
pulumi.set(__self__, "provisioning", provisioning)
if requested is not None:
pulumi.set(__self__, "requested", requested)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if spec_hash is not None:
pulumi.set(__self__, "spec_hash", spec_hash)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def completed(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "completed")
@completed.setter
def completed(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "completed", value)
@property
@pulumi.getter(name="containsUpdate")
def contains_update(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "contains_update")
@contains_update.setter
def contains_update(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "contains_update", value)
@property
@pulumi.getter(name="failedProvisioning")
def failed_provisioning(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "failed_provisioning")
@failed_provisioning.setter
def failed_provisioning(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "failed_provisioning", value)
@property
@pulumi.getter(name="flattenedSecrets")
def flattened_secrets(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "flattened_secrets")
@flattened_secrets.setter
def flattened_secrets(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "flattened_secrets", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def output(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "output")
@output.setter
def output(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "output", value)
@property
@pulumi.getter(name="pollingUrl")
def polling_url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "polling_url")
@polling_url.setter
def polling_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "polling_url", value)
@property
@pulumi.getter
def provisioned(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioned")
@provisioned.setter
def provisioned(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioned", value)
@property
@pulumi.getter
def provisioning(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioning")
@provisioning.setter
def provisioning(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioning", value)
@property
@pulumi.getter
def requested(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "requested")
@requested.setter
def requested(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "requested", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="specHash")
def spec_hash(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "spec_hash")
@spec_hash.setter
def spec_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spec_hash", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class StorageAccountAdditionalResourcesArgs:
def __init__(__self__, *,
secrets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
StorageAccountAdditionalResources holds the additional resources
"""
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "secrets", value)
@pulumi.input_type
class StorageAccountOutputArgs:
def __init__(__self__, *,
connection_string1: Optional[pulumi.Input[str]] = None,
connection_string2: Optional[pulumi.Input[str]] = None,
key1: Optional[pulumi.Input[str]] = None,
key2: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None):
"""
StorageAccountOutput is the object that contains the output from creating a Storage Account object
"""
if connection_string1 is not None:
pulumi.set(__self__, "connection_string1", connection_string1)
if connection_string2 is not None:
pulumi.set(__self__, "connection_string2", connection_string2)
if key1 is not None:
pulumi.set(__self__, "key1", key1)
if key2 is not None:
pulumi.set(__self__, "key2", key2)
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
@property
@pulumi.getter(name="connectionString1")
def connection_string1(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "connection_string1")
@connection_string1.setter
def connection_string1(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string1", value)
@property
@pulumi.getter(name="connectionString2")
def connection_string2(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "connection_string2")
@connection_string2.setter
def connection_string2(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string2", value)
@property
@pulumi.getter
def key1(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key1")
@key1.setter
def key1(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key1", value)
@property
@pulumi.getter
def key2(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key2")
@key2.setter
def key2(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key2", value)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_name", value)
@pulumi.input_type
class StorageAccountSpecArgs:
def __init__(__self__, *,
resource_group: pulumi.Input[str],
access_tier: Optional[pulumi.Input[str]] = None,
data_lake_enabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
network_rule: Optional[pulumi.Input['StorageAccountSpecNetworkRuleArgs']] = None,
sku: Optional[pulumi.Input['StorageAccountSpecSkuArgs']] = None,
supports_https_traffic_only: Optional[pulumi.Input[bool]] = None):
"""
StorageAccountSpec defines the desired state of Storage
:param pulumi.Input[str] access_tier: StorageAccountAccessTier enumerates the values for access tier. Only one of the following access tiers may be specified. If none of the following access tiers is specified, the default one is Hot.
:param pulumi.Input[str] kind: StorageAccountKind enumerates the values for kind. Only one of the following kinds may be specified. If none of the following kinds is specified, the default one is StorageV2.
:param pulumi.Input['StorageAccountSpecSkuArgs'] sku: StorageAccountSku the SKU of the storage account.
"""
pulumi.set(__self__, "resource_group", resource_group)
if access_tier is not None:
pulumi.set(__self__, "access_tier", access_tier)
if data_lake_enabled is not None:
pulumi.set(__self__, "data_lake_enabled", data_lake_enabled)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if network_rule is not None:
pulumi.set(__self__, "network_rule", network_rule)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if supports_https_traffic_only is not None:
pulumi.set(__self__, "supports_https_traffic_only", supports_https_traffic_only)
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group")
@resource_group.setter
def resource_group(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group", value)
@property
@pulumi.getter(name="accessTier")
def access_tier(self) -> Optional[pulumi.Input[str]]:
"""
StorageAccountAccessTier enumerates the values for access tier. Only one of the following access tiers may be specified. If none of the following access tiers is specified, the default one is Hot.
"""
return pulumi.get(self, "access_tier")
@access_tier.setter
def access_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_tier", value)
@property
@pulumi.getter(name="dataLakeEnabled")
def data_lake_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "data_lake_enabled")
@data_lake_enabled.setter
def data_lake_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "data_lake_enabled", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
StorageAccountKind enumerates the values for kind. Only one of the following kinds may be specified. If none of the following kinds is specified, the default one is StorageV2.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="networkRule")
def network_rule(self) -> Optional[pulumi.Input['StorageAccountSpecNetworkRuleArgs']]:
return pulumi.get(self, "network_rule")
@network_rule.setter
def network_rule(self, value: Optional[pulumi.Input['StorageAccountSpecNetworkRuleArgs']]):
pulumi.set(self, "network_rule", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['StorageAccountSpecSkuArgs']]:
"""
StorageAccountSku the SKU of the storage account.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['StorageAccountSpecSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="supportsHttpsTrafficOnly")
def supports_https_traffic_only(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "supports_https_traffic_only")
@supports_https_traffic_only.setter
def supports_https_traffic_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "supports_https_traffic_only", value)
@pulumi.input_type
class StorageAccountSpecNetworkRuleArgs:
def __init__(__self__, *,
bypass: Optional[pulumi.Input[str]] = None,
default_action: Optional[pulumi.Input[str]] = None,
ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input['StorageAccountSpecNetworkRuleIpRulesArgs']]]] = None,
virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input['StorageAccountSpecNetworkRuleVirtualNetworkRulesArgs']]]] = None):
"""
:param pulumi.Input[str] bypass: Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'None', 'Logging', 'Metrics', 'AzureServices'
:param pulumi.Input[str] default_action: DefaultAction - Specifies the default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny'
:param pulumi.Input[Sequence[pulumi.Input['StorageAccountSpecNetworkRuleIpRulesArgs']]] ip_rules: IPRules - Sets the IP ACL rules
:param pulumi.Input[Sequence[pulumi.Input['StorageAccountSpecNetworkRuleVirtualNetworkRulesArgs']]] virtual_network_rules: VirtualNetworkRules - Sets the virtual network rules
"""
if bypass is not None:
pulumi.set(__self__, "bypass", bypass)
if default_action is not None:
pulumi.set(__self__, "default_action", default_action)
if ip_rules is not None:
pulumi.set(__self__, "ip_rules", ip_rules)
if virtual_network_rules is not None:
pulumi.set(__self__, "virtual_network_rules", virtual_network_rules)
@property
@pulumi.getter
def bypass(self) -> Optional[pulumi.Input[str]]:
"""
Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'None', 'Logging', 'Metrics', 'AzureServices'
"""
return pulumi.get(self, "bypass")
@bypass.setter
def bypass(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bypass", value)
@property
@pulumi.getter(name="defaultAction")
def default_action(self) -> Optional[pulumi.Input[str]]:
"""
DefaultAction | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AdmCredentialResponse',
'ApnsCredentialResponse',
'BaiduCredentialResponse',
'GcmCredentialResponse',
'MpnsCredentialResponse',
'SharedAccessAuthorizationRulePropertiesResponse',
'SharedAccessAuthorizationRuleResourceResponse',
'SkuResponse',
'WnsCredentialResponse',
]
@pulumi.output_type
class AdmCredentialResponse(dict):
"""
Description of a NotificationHub AdmCredential.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authTokenUrl":
suggest = "auth_token_url"
elif key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AdmCredentialResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AdmCredentialResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AdmCredentialResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
auth_token_url: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None):
"""
Description of a NotificationHub AdmCredential.
:param str auth_token_url: The URL of the authorization token.
:param str client_id: The client identifier.
:param str client_secret: The credential secret access key.
"""
if auth_token_url is not None:
pulumi.set(__self__, "auth_token_url", auth_token_url)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
@property
@pulumi.getter(name="authTokenUrl")
def auth_token_url(self) -> Optional[str]:
"""
The URL of the authorization token.
"""
return pulumi.get(self, "auth_token_url")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The client identifier.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
"""
The credential secret access key.
"""
return pulumi.get(self, "client_secret")
@pulumi.output_type
class ApnsCredentialResponse(dict):
"""
Description of a NotificationHub ApnsCredential.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "apnsCertificate":
suggest = "apns_certificate"
elif key == "appId":
suggest = "app_id"
elif key == "appName":
suggest = "app_name"
elif key == "certificateKey":
suggest = "certificate_key"
elif key == "keyId":
suggest = "key_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApnsCredentialResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApnsCredentialResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApnsCredentialResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
apns_certificate: Optional[str] = None,
app_id: Optional[str] = None,
app_name: Optional[str] = None,
certificate_key: Optional[str] = None,
endpoint: Optional[str] = None,
key_id: Optional[str] = None,
thumbprint: Optional[str] = None,
token: Optional[str] = None):
"""
Description of a NotificationHub ApnsCredential.
:param str apns_certificate: The APNS certificate. Specify if using Certificate Authentication Mode.
:param str app_id: The issuer (iss) registered claim key. The value is a 10-character TeamId, obtained from your developer account. Specify if using Token Authentication Mode.
:param str app_name: The name of the application or BundleId. Specify if using Token Authentication Mode.
:param str certificate_key: The APNS certificate password if it exists.
:param str endpoint: The APNS endpoint of this credential. If using Certificate Authentication Mode and Sandbox specify 'gateway.sandbox.push.apple.com'. If using Certificate Authentication Mode and Production specify 'gateway.push.apple.com'. If using Token Authentication Mode and Sandbox specify 'https://api.development.push.apple.com:443/3/device'. If using Token Authentication Mode and Production specify 'https://api.push.apple.com:443/3/device'.
:param str key_id: A 10-character key identifier (kid) key, obtained from your developer account. Specify if using Token Authentication Mode.
:param str thumbprint: The APNS certificate thumbprint. Specify if using Certificate Authentication Mode.
:param str token: Provider Authentication Token, obtained through your developer account. Specify if using Token Authentication Mode.
"""
if apns_certificate is not None:
pulumi.set(__self__, "apns_certificate", apns_certificate)
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if app_name is not None:
pulumi.set(__self__, "app_name", app_name)
if certificate_key is not None:
pulumi.set(__self__, "certificate_key", certificate_key)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
if token is not None:
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="apnsCertificate")
def apns_certificate(self) -> Optional[str]:
"""
The APNS certificate. Specify if using Certificate Authentication Mode.
"""
return pulumi.get(self, "apns_certificate")
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[str]:
"""
The issuer (iss) registered claim key. The value is a 10-character TeamId, obtained from your developer account. Specify if using Token Authentication Mode.
"""
return pulumi.get(self, "app_id")
@property
@pulumi.getter(name="appName")
def app_name(self) -> Optional[str]:
"""
The name of the application or BundleId. Specify if using Token Authentication Mode.
"""
return pulumi.get(self, "app_name")
@property
@pulumi.getter(name="certificateKey")
def certificate_key(self) -> Optional[str]:
"""
The APNS certificate password if it exists.
"""
return pulumi.get(self, "certificate_key")
@property
@pulumi.getter
def endpoint(self) -> Optional[str]:
"""
The APNS endpoint of this credential. If using Certificate Authentication Mode and Sandbox specify 'gateway.sandbox.push.apple.com'. If using Certificate Authentication Mode and Production specify 'gateway.push.apple.com'. If using Token Authentication Mode and Sandbox specify 'https://api.development.push.apple.com:443/3/device'. If using Token Authentication Mode and Production specify 'https://api.push.apple.com:443/3/device'.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[str]:
"""
A 10-character key identifier (kid) key, obtained from your developer account. Specify if using Token Authentication Mode.
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
The APNS certificate thumbprint. Specify if using Certificate Authentication Mode.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def token(self) -> Optional[str]:
"""
Provider Authentication Token, obtained through your developer account. Specify if using Token Authentication Mode.
"""
return pulumi.get(self, "token")
@pulumi.output_type
class BaiduCredentialResponse(dict):
"""
Description of a NotificationHub BaiduCredential.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "baiduApiKey":
suggest = "baidu_api_key"
elif key == "baiduEndPoint":
suggest = "baidu_end_point"
elif key == "baiduSecretKey":
suggest = "baidu_secret_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BaiduCredentialResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BaiduCredentialResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BaiduCredentialResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
baidu_api_key: Optional[str] = None,
baidu_end_point: Optional[str] = None,
baidu_secret_key: Optional[str] = None):
"""
Description of a NotificationHub BaiduCredential.
:param str baidu_api_key: Baidu Api Key.
:param str baidu_end_point: Baidu Endpoint.
:param str baidu_secret_key: Baidu Secret Key
"""
if baidu_api_key is not None:
pulumi.set(__self__, "baidu_api_key", baidu_api_key)
if baidu_end_point is not None:
pulumi.set(__self__, "baidu_end_point", baidu_end_point)
if baidu_secret_key is not None:
pulumi.set(__self__, "baidu_secret_key", baidu_secret_key)
@property
@pulumi.getter(name="baiduApiKey")
def baidu_api_key(self) -> Optional[str]:
"""
Baidu Api Key.
"""
return pulumi.get(self, "baidu_api_key")
@property
@pulumi.getter(name="baiduEndPoint")
def baidu_end_point(self) -> Optional[str]:
"""
Baidu Endpoint.
"""
return pulumi.get(self, "baidu_end_point")
@property
@pulumi.getter(name="baiduSecretKey")
def baidu_secret_key(self) -> Optional[str]:
"""
Baidu Secret Key
"""
return pulumi.get(self, "baidu_secret_key")
@pulumi.output_type
class GcmCredentialResponse(dict):
"""
Description of a NotificationHub GcmCredential.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "gcmEndpoint":
suggest = "gcm_endpoint"
elif key == "googleApiKey":
suggest = "google_api_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GcmCredentialResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GcmCredentialResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GcmCredentialResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
gcm_endpoint: Optional[str] = None,
google_api_key: Optional[str] = None):
"""
Description of a NotificationHub GcmCredential.
:param str gcm_endpoint: The FCM legacy endpoint. Default value is 'https://fcm.googleapis.com/fcm/send'
:param str google_api_key: The Google API key.
"""
if gcm_endpoint is not None:
pulumi.set(__self__, "gcm_endpoint", gcm_endpoint)
if google_api_key is not None:
pulumi.set(__self__, "google_api_key", google_api_key)
@property
@pulumi.getter(name="gcmEndpoint")
def gcm_endpoint(self) -> Optional[str]:
"""
The FCM legacy endpoint. Default value is 'https://fcm.googleapis.com/fcm/send'
"""
return pulumi.get(self, "gcm_endpoint")
@property
@pulumi.getter(name="googleApiKey")
def google_api_key(self) -> Optional[str]:
"""
The Google API key.
"""
return pulumi.get(self, "google_api_key")
@pulumi.output_type
class MpnsCredentialResponse(dict):
"""
Description of a NotificationHub MpnsCredential.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateKey":
suggest = "certificate_key"
elif key == "mpnsCertificate":
suggest = "mpns_certificate"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MpnsCredentialResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MpnsCredentialResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MpnsCredentialResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_key: Optional[str] = None,
mpns_certificate: Optional[str] = None,
thumbprint: Optional[str] = None):
"""
Description of a NotificationHub MpnsCredential.
:param str certificate_key: The certificate key for this credential.
:param str mpns_certificate: The MPNS certificate.
:param str thumbprint: The MPNS certificate Thumbprint
"""
if certificate_key is not None:
pulumi.set(__self__, | |
<reponame>NunaInc/sql_tools
#
# nuna_sql_tools: Copyright 2022 Nuna Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Converts Schema to Clickhouse specific SQL create table statement."""
import dataclasses
import os
from google.protobuf import descriptor
from dataschema import Schema, Schema_pb2, proto2schema, python2schema, strutil
from types import ModuleType
from typing import Dict, List, Optional
def GetIndent(indent: int) -> str:
return ' ' * indent
def GetTimestampStr(column: Schema.Column) -> str:
info = column.timestamp_info()
if info is None:
return ''
s = f'({info.precision}'
if info.timezone:
s += f', "{info.timezone}"'
s += ')'
return s
TAB_SIZE = 2
CLICKHOUSE_TYPE_NAME = {
Schema_pb2.ColumnInfo.TYPE_STRING: 'String',
Schema_pb2.ColumnInfo.TYPE_BYTES: 'String',
Schema_pb2.ColumnInfo.TYPE_BOOLEAN: 'UInt8',
Schema_pb2.ColumnInfo.TYPE_INT_8: 'Int8',
Schema_pb2.ColumnInfo.TYPE_INT_16: 'Int16',
Schema_pb2.ColumnInfo.TYPE_INT_32: 'Int32',
Schema_pb2.ColumnInfo.TYPE_INT_64: 'Int64',
Schema_pb2.ColumnInfo.TYPE_UINT_8: 'UInt8',
Schema_pb2.ColumnInfo.TYPE_UINT_16: 'UInt16',
Schema_pb2.ColumnInfo.TYPE_UINT_32: 'UInt32',
Schema_pb2.ColumnInfo.TYPE_UINT_64: 'UInt64',
Schema_pb2.ColumnInfo.TYPE_DECIMAL: 'Decimal',
Schema_pb2.ColumnInfo.TYPE_FLOAT_32: 'Float32',
Schema_pb2.ColumnInfo.TYPE_FLOAT_64: 'Float64',
Schema_pb2.ColumnInfo.TYPE_DATE: 'Date',
Schema_pb2.ColumnInfo.TYPE_DATETIME_64: 'DateTime64',
Schema_pb2.ColumnInfo.TYPE_NESTED: 'Nested',
Schema_pb2.ColumnInfo.TYPE_ARRAY: 'Array',
Schema_pb2.ColumnInfo.TYPE_SET: 'Set',
}
class TableConverter:
"""Converts a schema Table to a SQL create table statement."""
def __init__(self, table: Schema.Table):
self.table = table
def _get_decimal_str(self, column: Schema.Column) -> str:
info = column.decimal_info()
if info is None:
raise ValueError(
f'No decimal info for decimal column `{column.name()}`.')
if info.precision <= 9:
size = 32
elif info.precision <= 18:
size = 64
elif info.precision <= 38:
size = 128
elif info.precision <= 76:
size = 256
else:
raise ValueError('Decimal precision out of range for '
f'`{column.name()}`: {info.precision}')
return f'Decimal{size}({info.scale})'
def _get_timestamp_str(self, column: Schema.Column) -> str:
return GetTimestampStr(column)
def _get_compression_name(self, value: int) -> Optional[str]:
if value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_LZ4:
return 'LZ4'
if value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_LZ4HC:
return 'LZ4HC'
elif value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_ZSTD:
return 'ZSTD'
elif value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_UNCOMPRESSED:
return 'UNCOMPRESSED'
return None
def _get_codec(self, column: Schema.Column,
is_nested: bool) -> Optional[str]:
"""Extracts the Clickhouse encoding string for `column`."""
if column.is_low_cardinality():
# No compression for low cardinality allowed in clickhouse.
return None
if is_nested:
# No compression for descendants of nested columns.
return None
# TODO: Support compression for de-sugared nested columns, e.g.:
# `field.sub_field` String CODEC(ZSTD)
codecs = []
delta = column.clickhouse_annotation.delta_compression_width
if delta:
codecs.append(f'Delta({delta})')
compression = self._get_compression_name(
column.clickhouse_annotation.compression_type)
if compression is None:
# TODO: Support different default compression for nested tables,
# currently uses default compression from parent table.
compression = self._get_compression_name(
self.table.clickhouse_annotation.default_compression)
if compression is not None and compression != 'UNCOMPRESSED':
level = column.clickhouse_annotation.compression_level
if level:
compression += f'({level})'
codecs.append(compression)
if codecs:
return ', '.join(codecs)
return None
def _column_to_sql(self, column: Schema.Column, indent: int,
type_only: bool, is_nested: bool, is_wrapped: bool=False
) -> str:
"""Returns a Clickhouse SQL column specification for `column`.
Parameters:
column: Column specification.
indent: Number of indentations at previous level.
type_only: Whether or not to return only the column type.
is_nested: Whether or not the column is a descendant of a nested column.
is_wrapped: Whether or not the column's parent is a wrapper, such as
Array(...). Used to indent nested columns inside wrappers.
Returns:
str: Clickhouse SQL column specification for `column`.
"""
s = ''
if not type_only:
s += f'{GetIndent(indent)}{column.sql_name()} '
end = ''
column_type = column.info.column_type
if (column.info.label == Schema_pb2.ColumnInfo.LABEL_REPEATED and
column_type != Schema_pb2.ColumnInfo.TYPE_MAP and
column_type != Schema_pb2.ColumnInfo.TYPE_NESTED):
s += 'Array('
end += ')'
if column.is_low_cardinality():
s += 'LowCardinality('
end += ')'
# ClickHouse nested types (Nested, Tuple) cannot be inside a Nullable.
if (column.info.label == Schema_pb2.ColumnInfo.LABEL_OPTIONAL and
column_type != Schema_pb2.ColumnInfo.TYPE_NESTED):
s += 'Nullable('
end += ')'
if column_type == Schema_pb2.ColumnInfo.TYPE_MAP:
ktype = self._column_to_sql(
column.fields[0], 0, type_only=True, is_nested=is_nested)
vtype = self._column_to_sql(
column.fields[1], 0, type_only=True, is_nested=is_nested)
s += f'Map({ktype}, {vtype})'
elif column_type in [
Schema_pb2.ColumnInfo.TYPE_ARRAY, Schema_pb2.ColumnInfo.TYPE_SET
]:
s += self._column_to_sql(
column.fields[0], 0, type_only=True, is_nested=is_nested,
is_wrapped=True)
elif column.clickhouse_annotation.type_name:
s += column.clickhouse_annotation.type_name
elif column_type == Schema_pb2.ColumnInfo.TYPE_DECIMAL:
s += self._get_decimal_str(column)
else:
if column_type not in CLICKHOUSE_TYPE_NAME:
raise KeyError(
f'Unknown type to convert to clickhouse: {column_type}')
if (column_type == Schema_pb2.ColumnInfo.TYPE_NESTED and
column.clickhouse_annotation.nested_type_name):
s += column.clickhouse_annotation.nested_type_name
else:
s += CLICKHOUSE_TYPE_NAME[column_type]
if column_type == Schema_pb2.ColumnInfo.TYPE_DECIMAL:
s += self._get_decimal_str(column)
elif column_type == Schema_pb2.ColumnInfo.TYPE_DATETIME_64:
s += self._get_timestamp_str(column)
elif column_type == Schema_pb2.ColumnInfo.TYPE_NESTED:
# If the nested type is within a wrapper, increase indentation.
nested_indent = (indent + (2 * TAB_SIZE) if is_wrapped else
indent + TAB_SIZE)
wrapper_indent = indent + TAB_SIZE if is_wrapped else indent
sub_columns = []
for sub_column in column.fields:
sub_columns.append(
self._column_to_sql(sub_column,
nested_indent,
type_only=False,
is_nested=True))
sub_columns_str = ',\n'.join(sub_columns)
s += f'(\n{sub_columns_str}\n{GetIndent(wrapper_indent)})'
s += end
if not type_only:
codec = self._get_codec(column, is_nested=is_nested)
if codec is not None:
s += f' CODEC({codec})'
return s
def columns_sql(self, indent: int) -> List[str]:
"""Returns a list of Clickhouse SQL column specifications."""
columns = []
for column in self.table.columns:
columns.append(self._column_to_sql(
column, indent, type_only=False, is_nested=False))
return columns
def table_options(self, replication_params: str) -> str:
"""Extracts Clickhouse CREATE TABLE options for this message."""
copt = []
force_order_by = False
if self.table.clickhouse_annotation.HasField('engine'):
if (self.table.clickhouse_annotation.engine ==
Schema_pb2.TableClickhouseAnnotation.ENGINE_MERGE_TREE):
force_order_by = True
copt.append('ENGINE = MergeTree()')
elif (self.table.clickhouse_annotation.engine ==
Schema_pb2.TableClickhouseAnnotation.ENGINE_LOG):
copt.append('ENGINE = Log()')
elif (self.table.clickhouse_annotation.engine ==
Schema_pb2.TableClickhouseAnnotation.ENGINE_TINY_LOG):
copt.append('ENGINE = TinyLog()')
elif (self.table.clickhouse_annotation.engine == Schema_pb2.
TableClickhouseAnnotation.ENGINE_REPLICATED_MERGE_TREE):
force_order_by = True
copt.append(
f'ENGINE = ReplicatedMergeTree({replication_params})')
if self.table.clickhouse_annotation.order_by_fields:
order_by = ', '.join(
self.table.clickhouse_annotation.order_by_fields)
copt.append(f'ORDER BY ({order_by})')
elif force_order_by:
copt.append('ORDER BY tuple()')
if self.table.clickhouse_annotation.partition_by_sql_expression:
partition_by = ', '.join(
self.table.clickhouse_annotation.partition_by_sql_expression)
copt.append(f'PARTITION BY ({partition_by})')
if self.table.clickhouse_annotation.sample_by_sql_expression:
sample_by = ', '.join(
self.table.clickhouse_annotation.sample_by_sql_expression)
copt.append(f'SAMPLE BY ({sample_by})')
if self.table.clickhouse_annotation.index_granularity > 0:
ig = self.table.clickhouse_annotation.index_granularity
copt.append(f'SETTINGS index_granularity = {ig}')
if self.table.data_annotation.comment:
comment = "'" + repr('"' + self.table.data_annotation.comment)[2:]
copt.append(f'COMMENT {comment}')
return copt
def to_sql(self,
table_name: Optional[str] = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False) -> str:
"""Returns a CREATE TABLE SQL statement for this message."""
s = 'CREATE TABLE '
if if_not_exists:
s += 'IF NOT EXISTS '
tname_str = table_name if table_name else self.table.name()
columns_str = ',\n'.join(self.columns_sql(2))
s += f'{tname_str} (\n{columns_str}\n)\n'
copts = self.table_options(replication_params)
if copts:
copts_str = '\n'.join(copts)
s += f'\n{copts_str}'
return s
def validate(self) -> bool:
"""Validates the message as a SQL table. Raises exceptions on errors."""
return self.table.validate()
class FileConverter:
"""Converts a proto FileDescriptor to corresponding SQL table statement."""
def __init__(self):
self.name = None
self.basename = None
self.package = None
self.converters = None
def from_proto_file(
self,
file_descriptor: descriptor.FileDescriptor) -> 'FileConverter':
self.name = file_descriptor.name
self.basename = strutil.StripSuffix(
os.path.basename(file_descriptor.name), '.proto')
self.package = file_descriptor.package
self.java_package = file_descriptor.GetOptions().java_package
self.converters = [
TableConverter(proto2schema.ConvertMessage(msg))
for msg in file_descriptor.message_types_by_name.values()
]
return self
def from_module(self, py_module: ModuleType) -> 'FileConverter':
self.name = py_module.__name__
self.basename = strutil.StripSuffix(
os.path.basename(py_module.__file__), '.py')
self.package = py_module.__name__
self.converters = [
TableConverter(python2schema.ConvertDataclass(datacls))
for datacls in py_module.__dict__.values()
if dataclasses.is_dataclass(datacls)
]
return self
def get_path(self, dir_map, basename) -> str:
"""Returns directory path for saving SQL file `basename`."""
end_path = os.path.join('/'.join(self.package.split('.')), basename)
for k, v in dir_map.items():
if self.name.startswith(k):
return os.path.join(v, end_path)
return end_path
def to_sql(self,
table_name: str = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False):
"""Converts the messages in this file to several SQL CREATE TABLE."""
result = {}
for conv in self.converters:
result[conv.table.name()] = conv.to_sql(table_name,
replication_params,
if_not_exists)
return result
def validate(self) -> bool:
"""Validates the messages and fields in this file for SQL correctness."""
for conv in self.converters:
conv.validate()
return True
def ConvertTable(table: Schema.Table,
table_name: str = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False) -> str:
return TableConverter(table).to_sql(table_name, replication_params,
if_not_exists)
class SchemaConverter:
"""Converts a list of file descriptors to SQL create statements."""
def __init__(self):
self.file_converters = []
def add_descriptors(self,
file_descriptors: List[descriptor.FileDescriptor],
export_only: Optional[bool] = False):
for fd in file_descriptors:
try:
fc = FileConverter().from_proto_file(fd)
if not export_only:
self.file_converters.append(fc)
except ValueError as e:
raise ValueError(f'Processing proto file: {fd.name}') from e
def add_modules(self,
py_modules: List[ModuleType],
export_only: Optional[bool] = False):
for pym in py_modules:
try:
fc = FileConverter().from_module(pym)
if not export_only:
self.file_converters.append(fc)
except ValueError as e:
raise ValueError(
f'Processing input pyton module: {pym.__name__}'
f' / {pym.__file__}') from e
def to_sql_files(self,
dir_map: Dict[str, str],
table_name: Optional[str] = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False) -> Dict[str, str]:
files = {}
for fc in | |
<filename>vhdl_navigation.py
from __future__ import absolute_import
import sublime, sublime_plugin
import re, string, os, sys, functools, mmap, pprint, imp, threading
from collections import Counter
from plistlib import readPlistFromBytes
try:
from . import vhdl_module
from .util import vhdl_util
from .util import sublime_util
from .color_scheme_util import st_color_scheme_matcher
from .color_scheme_util import rgba
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), "util"))
import vhdl_util
import sublime_util
sys.path.append(os.path.join(os.path.dirname(__file__), "color_scheme_util"))
import st_color_scheme_matcher
import rgba
############################################################################
# Init
default_type = [
'bit', 'bit_vector', 'boolean', 'character', 'integer', 'natural', 'positive', 'real', 'string',
'std_logic', 'std_ulogic', 'std_logic_vector', 'std_ulogic_vector', 'signed', 'unsigned'
]
tooltip_css = ''
tooltip_flag = 0
show_ref = True
colors = {}
def plugin_loaded():
imp.reload(vhdl_util)
imp.reload(sublime_util)
imp.reload(st_color_scheme_matcher)
# Ensure the preference settings are properly reloaded when changed
global pref_settings
pref_settings = sublime.load_settings('Preferences.sublime-settings')
pref_settings.clear_on_change('reload')
pref_settings.add_on_change('reload',plugin_loaded)
# Ensure the VHDL settings are properly reloaded when changed
global vhdl_settings
vhdl_settings = sublime.load_settings('VHDL.sublime-settings')
vhdl_settings.clear_on_change('reload')
vhdl_settings.add_on_change('reload',plugin_loaded)
global tooltip_flag
if vhdl_settings.get('vhdl.tooltip_hide_on_move',True):
tooltip_flag = sublime.HIDE_ON_MOUSE_MOVE_AWAY
else:
tooltip_flag = 0
global show_ref
show_ref = int(sublime.version()) >= 3145 and vhdl_settings.get('vhdl.tooltip_show_refs',True)
init_css()
def init_css():
global tooltip_css
scheme = st_color_scheme_matcher.ColorSchemeMatcher(pref_settings.get('color_scheme'))
bg = scheme.get_special_color('background')
fg = scheme.get_special_color('foreground')
# Create background and border color based on the background color
bg_rgb = rgba.RGBA(bg)
if bg_rgb.b > 128:
bgHtml = bg_rgb.b - 0x33
bgBody = bg_rgb.b - 0x20
else:
bgHtml = bg_rgb.b + 0x33
bgBody = bg_rgb.b + 0x20
if bg_rgb.g > 128:
bgHtml += (bg_rgb.g - 0x33)<<8
bgBody += (bg_rgb.g - 0x20)<<8
else:
bgHtml += (bg_rgb.g + 0x33)<<8
bgBody += (bg_rgb.g + 0x20)<<8
if bg_rgb.r > 128:
bgHtml += (bg_rgb.r - 0x33)<<16
bgBody += (bg_rgb.r - 0x20)<<16
else:
bgHtml += (bg_rgb.r + 0x33)<<16
bgBody += (bg_rgb.r + 0x20)<<16
tooltip_css = 'html {{ background-color: #{bg:06x}; color: {fg}; }}\n'.format(bg=bgHtml, fg=fg)
tooltip_css+= 'body {{ background-color: #{bg:06x}; margin: 1px; font-size: 1em; }}\n'.format(bg=bgBody)
tooltip_css+= 'p {padding-left: 0.6em;}\n'
tooltip_css+= '.content {margin: 0.8em;}\n'
tooltip_css+= 'h1 {font-size: 1.0rem;font-weight: bold; margin: 0 0 0.25em 0;}\n'
tooltip_css+= 'a {{color: {c};}}\n'.format(c=fg)
tooltip_css+= '.keyword {{color: {c};}}\n'.format(c=scheme.get_color('keyword'))
tooltip_css+= '.support {{color: {c};}}\n'.format(c=scheme.get_color('support'))
tooltip_css+= '.storage {{color: {c};}}\n'.format(c=scheme.get_color('storage'))
tooltip_css+= '.function {{color: {c};}}\n'.format(c=scheme.get_color('support.function'))
tooltip_css+= '.entity {{color: {c};}}\n'.format(c=scheme.get_color('entity'))
tooltip_css+= '.operator {{color: {c};}}\n'.format(c=scheme.get_color('keyword.operator'))
tooltip_css+= '.numeric {{color: {c};}}\n'.format(c=scheme.get_color('constant.numeric'))
tooltip_css+= '.string {{color: {c};}}\n'.format(c=scheme.get_color('string'))
tooltip_css+= '.extra-info {font-size: 0.9em; }\n'
tooltip_css+= '.ref_links {font-size: 0.9em; color: #0080D0; padding-left: 0.6em}\n'
global colors
colors['operator'] = scheme.get_color('keyword.operator')
############################################################################
# Help function to retrieve type
def type_info(view, t, region):
if region:
pos = view.line(region).b
else:
pos = self.view.size()
txt = view.substr(sublime.Region(0, pos))
tti = vhdl_util.get_type_info(txt,t,4)
if not tti or not tti['type']:
filelist = view.window().lookup_symbol_in_index(t)
if filelist:
file_ext = ('vhd','vhdl')
# file_ext = tuple(self.settings.get('vhdl.ext',['vhd','vhdl']))
file_checked = []
for f in filelist:
fname = sublime_util.normalize_fname(f[0])
if fname in file_checked:
continue
file_checked.append(fname)
if fname.lower().endswith(file_ext):
# print(v + ' of type ' + t + ' defined in ' + str(fname))
tti = vhdl_util.get_type_info_file(fname,t,4)
if tti['type']:
tti['fname'] = (f[0],f[2][0],f[2][1])
# print(tti['fname'])
break
# print(['[type_info] tti={}'.format(tti)])
return tti
def type_info_on_hier(view, varname, txt=None, region=None):
va = varname.split('.')
ti = None
scope = ''
if not txt and region:
txt = view.substr(sublime.Region(0, view.line(region).b))
for i in range(0,len(va)):
v = va[i].split('[')[0] # retrieve name without array part
# Get type definition: first iteration is done inside current file
if i==0:
ti = vhdl_util.get_type_info(txt, v,4)
# print('[type_info_on_hier] level {} : {} has type {}'.format(i,v,ti['type']))
elif ti and ti['type']:
ti = type_info(view,ti['type'],region)
# print('[type_info_on_hier] level {} : {} has type {}'.format(i,v,ti['type']))
if ti and ti['type']=='record' :
fti = vhdl_util.get_all_type_info_from_record(ti['decl'])
line = 0 if 'fname' not in ti else ti['fname'][1]+1
for f in fti:
if f['name'].lower()==v.lower():
if 'fname' in ti:
f['fname'] = (ti['fname'][0],line,ti['fname'][2])
ti = f
break
line += 1
return ti
############################################################################
callbacks_on_load = {}
class VhdlOnLoadEventListener(sublime_plugin.EventListener):
# Called when a file is finished loading.
def on_load_async(self, view):
global callbacks_on_load
if view.file_name() in callbacks_on_load:
callbacks_on_load[view.file_name()]()
del callbacks_on_load[view.file_name()]
############################################################################
# Display type of the signal/variable under the cursor into the status bar #
# Event onHover to display the popup
class VhdlShowTypeHover(sublime_plugin.EventListener):
def on_hover(self, view, point, hover_zone):
# Popup only on text
if hover_zone != sublime.HOVER_TEXT:
return
# Check file size to optionnaly disable the feature (finding the information can be quite long)
threshold = view.settings().get('vhdl.hover_max_size',-1)
if view.size() > threshold and threshold!=-1 :
return
# Only show a popup for vhdl, when not in a string of a comment
scope = view.scope_name(point)
if 'source.vhdl' not in scope:
return
if any(w in scope for w in ['comment', 'string', 'keyword']):
return
popup = VhdlTypePopup(view)
sublime.set_timeout_async(lambda r=view.word(point), p=point: popup.show(r,p))
class VhdlTypePopup :
def __init__(self,view):
self.view = view
def show(self,region,location):
# If nothing is selected expand selection to word
if region.empty() :
region = self.view.word(region)
# Make sure a whole word is selected
elif (self.view.classify(region.a) & sublime.CLASS_WORD_START)==0 or (self.view.classify(region.b) & sublime.CLASS_WORD_END)==0:
if (self.view.classify(region.a) & sublime.CLASS_WORD_START)==0:
region.a = self.view.find_by_class(region.a,False,sublime.CLASS_WORD_START)
if (self.view.classify(region.b) & sublime.CLASS_WORD_END)==0:
region.b = self.view.find_by_class(region.b,True,sublime.CLASS_WORD_END)
# Extends to parent if previous character is a dot
while region.a>1 and self.view.substr(sublime.Region(region.a-1,region.a))=='.' :
c = self.view.substr(sublime.Region(region.a-2,region.a-1))
# Array selection -> extend to start of array
if c == ')':
region.a = self.view.find_by_class(region.a-3,False,sublime.CLASS_WORD_START)
if self.view.classify(region.a-2) & sublime.CLASS_WORD_START:
region.a = region.a-2
else :
region.a = self.view.find_by_class(region.a-2,False,sublime.CLASS_WORD_START)
v = self.view.substr(region)
# print('[VhdlTypePopup] Var = {}'.format(v))
# trigger on valid word only
# if not re.match(r'^[A-Za-z_]\w*$',v):
# return
#
s,ti = self.get_type(v,region)
if not s:
sublime.status_message('No definition found for ' + v)
else :
ref_name = ''
s = self.color_str(s,True,ti)
if ti and ti['type'] in ['entity', 'component']:
ref_name = ti['name']
# Records: add field definition
if ti['type'] and ti['tag']:
type_base= ti['type'].split('(')[0].lower()
if ti['tag'] in ['signal','port'] and type_base not in default_type:
tti = type_info(self.view,ti['type'],region)
if tti and tti['type'] == 'record' :
fti = vhdl_util.get_all_type_info_from_record(tti['decl'])
template='<br><span class="extra-info">{0}{1}</span>'
for f in fti:
x = self.color_str(f['decl'])
s += template.format(' '*4,x)
# Add reference list
if show_ref and ref_name :
refs = self.view.window().lookup_references_in_index(ref_name)
if refs:
ref_links = []
for l in refs :
l_href = '{}:{}:{}'.format(l[0],l[2][0],l[2][1])
l_name = os.path.basename(l[0])
ref_links.append('<a href="LINK@{}" class="ref_links">{}</a>'.format(l_href,l_name))
s += '<h1><br>Reference:</h1><span>{}</span>'.format('<br>'.join(ref_links))
# Create popup
s = '<style>{css}</style><div class="content">{txt}</div>'.format(css=tooltip_css, txt=s)
self.view.show_popup(s,location=location, flags=tooltip_flag, max_width=500, on_navigate=self.on_navigate)
def get_type(self,var_name,region):
scope = self.view.scope_name(region.a)
ti = None
txt = ''
# print('[VhdlTypePopup:get_type] Var={}, region={}, scope={}'.format(var_name,region,scope))
if 'variable.parameter.port' in scope:
if 'meta.block.entity_instantiation' in scope:
r_inst = sublime_util.expand_to_scope(self.view,'meta.block.entity_instantiation',region)
elif 'meta.block.component_instantiation' in scope:
r_inst = sublime_util.expand_to_scope(self.view,'meta.block.component_instantiation',region)
inst_txt = self.view.substr(r_inst)
m = re.search(r'(?si)(?:(?P<scope>\w+)\.)?(?P<mname>\w+)\s+(?:port|generic)',inst_txt)
if m:
re_str = r'(?si)(?P<type>component|entity)\s+(?P<name>'+m.group('mname')+r')\s+is\s+(?P<content>.*?)\bend\s+((?P=type)|(?P=name))'
info = sublime_util.lookup_symbol(self.view,m.group('mname'),re_str)
# print('Port {} in module {} defined in {}'.format(var_name,m.group('mname'),info))
# TODO: handle component
if info['match']:
ti = vhdl_util.get_type_info(info['match'].group('content'),var_name,4)
if ti:
txt = ti['decl']
elif 'entity.name.type.entity' in scope or 'entity.name.type.component' in scope:
t = 'component' if 'component' in scope else 'entity'
ti = {'decl': '{} {}'.format(t,var_name), 'type':t, 'name':var_name, 'tag':'decl', 'value':None}
txt = ti['decl']
elif 'storage.type.entity.reference' in scope or 'storage.type.component.reference' in scope:
t = 'component' if 'component' in scope else 'entity'
ti = {'decl': '{} {}'.format(t,var_name), 'type':t, 'name':var_name, 'tag':'reference', 'value':None}
txt = ti['decl']
elif 'storage.type.userdefined' in scope :
ti = type_info(self.view,var_name,region)
if ti:
txt = ti['decl']
if ti['type'] == 'record' :
txt = re.sub(r'(\brecord\b|;)',r'\1<br>',txt)
elif '.' in var_name:
ti = type_info_on_hier(self.view, var_name, region=region)
if ti:
txt = ti['decl']
else :
# lookup for a signal/variable declaration in current file
lines = self.view.substr(sublime.Region(0, self.view.line(region).b))
ti = vhdl_util.get_type_info(lines,var_name,4)
if ti:
txt = ti['decl']
return txt,ti
def color_str(self,s, addLink=False, ti_var=None):
# Split all text in word, special character, space and line return
words = re.findall(r"\w+|<<|>>|[^\w\s]|\s+", s)
# print('[color_str] String = "{}" \n Split => {}\n ti = {}'.format(s,words,ti_var))
# print(ti_var)
sh = ''
idx_type = -1
link = ''
if words[0].lower() in ['signal','variable','constant','alias']:
idx_type = 6
link = 'LOCAL@{}:{}'.format(words[0],words[2])
elif words[0] in ['port']:
idx_type = 8
link = 'LOCAL@{}:{}'.format(words[0],words[2])
elif ti_var :
if ti_var['tag']=='reference' :
re_str = r'(?si)(?P<type>entity)\s+(?P<name>'+ti_var['name']+r')\s+is'
info = sublime_util.lookup_symbol(self.view, ti_var['name'], re_str)
link = 'LINK@{}:{}:{}'.format(info['fname'],info['row'],info['col'])
elif ti_var['tag']=='generic':
idx_type = 4
sh+='<span class="keyword">generic</span> '
link = 'LOCAL@{}:{}'.format(words[0],words[2])
elif 'fname' in ti_var:
link = 'LINK@{}:{}:{}'.format(ti_var['fname'][0],ti_var['fname'][1],ti_var['fname'][2])
for i,w in enumerate(words):
# Check for keyword
if w.lower() in ['signal','variable','constant','port', 'type', 'is','end', 'record','array','downto','to','of','in','out','inout','entity','component','alias']:
sh+='<span class="keyword">{0}</span>'.format(w)
elif w in [':','-','+','=']:
sh+='<span class="operator">{0}</span>'.format(w)
elif w in ['<<','>>']:
wt = '<<' if w=='<<' else '>>'
sh+='<span class="operator">{0}</span>'.format(wt)
elif re.match(r'\d+',w):
sh+='<span class="numeric">{0}</span>'.format(w)
# Type
elif i==idx_type or w.lower() in default_type:
sh+='<span class="storage">{0}</span>'.format(w)
# Variable name
elif addLink and ti_var and link and w==ti_var['name']:
sh+='<a href="{}">{}</a>'.format(link,w)
# Unknown words/characters => copy as-is
elif not w.strip() :
sh += ' '
# Reduce multiple spaces to | |
# -*- coding: utf-8 -*-
"""
PET-MR image preprocessing nipype workflows.
"""
import os
import nipype.pipeline.engine as pe
from nipype.algorithms.misc import Gunzip
from nipype.interfaces import spm
from nipype.interfaces.utility import Merge, IdentityInterface, Function
from neuro_pypes._utils import format_pair_list, flatten_list, concat_to_pair_list
from neuro_pypes.config import (
setup_node,
check_atlas_file,
get_config_setting
)
from neuro_pypes.pet.pvc import petpvc_workflow
from neuro_pypes.preproc import (
spm_normalize,
spm_coregister,
spm_apply_deformations,
get_bounding_box
)
from neuro_pypes.utils import (
get_datasink,
spm_tpm_priors_path,
extend_trait_list,
get_input_node,
get_interface_node,
remove_ext,
get_input_file_name,
extension_duplicates
)
# TODO: merge the two workflows below, maybe splitting them in
# two wf steps: pre-processing then registration.
def spm_mrpet_preprocessing(wf_name="spm_mrpet_preproc"):
""" Run the PET pre-processing workflow against the
gunzip_pet.in_file files.
It depends on the anat_preproc_workflow, so if this
has not been run, this function will run it too.
# TODO: organize the anat2pet hack/condition somehow:
If anat2pet:
- SPM12 Coregister T1 and tissues to PET
- PETPVC the PET image in PET space
- SPM12 Warp PET to MNI
else:
- SPM12 Coregister PET to T1
- PETPVC the PET image in anatomical space
- SPM12 Warp PET in anatomical space to MNI through the
`anat_to_mni_warp`.
Parameters
----------
wf_name: str
Name of the workflow.
Nipype Inputs
-------------
pet_input.in_file: traits.File
The raw NIFTI_GZ PET image file
pet_input.anat: traits.File
Path to the high-contrast anatomical image.
Reference file of the warp_field, i.e., the
anatomical image in its native space.
pet_input.anat_to_mni_warp: traits.File
The warp field from the transformation of the
anatomical image to the standard MNI space.
pet_input.atlas_anat: traits.File
The atlas file in anatomical space.
pet_input.tissues: list of traits.File
List of tissues files from the New Segment process.
At least the first 3 tissues must be present.
Nipype outputs
--------------
pet_output.pvc_out: existing file
The results of the PVC process
pet_output.brain_mask: existing file
A brain mask calculated with the tissues file.
pet_output.coreg_ref: existing file
The coregistered reference image to PET space.
pet_output.coreg_others: list of existing files
List of coregistered files from coreg_pet.apply_to_files
pet_output.pvc_warped: existing file
Results from PETPVC normalized to MNI.
The result of every internal pre-processing step
is normalized to MNI here.
pet_output.warp_field: existing files
Spatial normalization parameters .mat files
pet_output.gm_norm: existing file
The output of the grey matter intensity
normalization process.
This is the last step in the PET signal correction,
before registration.
pet_output.atlas_pet: existing file
Atlas image warped to PET space.
If the `atlas_file` option is an existing file and
`normalize_atlas` is True.
Returns
-------
wf: nipype Workflow
"""
# specify input and output fields
in_fields = [
"in_file",
"anat",
"anat_to_mni_warp",
"tissues"
]
out_fields = [
"brain_mask",
"coreg_others",
"coreg_ref",
"pvc_warped",
"pet_warped", # 'pet_warped' is a dummy entry to keep the fields pattern.
"warp_field",
"pvc_out",
"pvc_mask",
"gm_norm"
]
do_atlas, _ = check_atlas_file()
if do_atlas:
in_fields += ["atlas_anat"]
out_fields += ["atlas_pet"]
# input
pet_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
name="pet_input")
# workflow to perform partial volume correction
petpvc = petpvc_workflow(wf_name="petpvc")
merge_list = setup_node(Merge(4), name='merge_for_unzip')
gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])
warp_pet = setup_node(spm_normalize(), name="warp_pet")
tpm_bbox = setup_node(Function(
function=get_bounding_box,
input_names=["in_file"],
output_names=["bbox"]),
name="tpm_bbox")
tpm_bbox.inputs.in_file = spm_tpm_priors_path()
# output
pet_output = setup_node(IdentityInterface(fields=out_fields), name="pet_output")
# Create the workflow object
wf = pe.Workflow(name=wf_name)
# check how to perform the registration, to decide how to build the pipeline
anat2pet = get_config_setting('registration.anat2pet', False)
if anat2pet:
wf.connect([
# inputs
(pet_input, petpvc, [("in_file", "pvc_input.in_file"),
("anat", "pvc_input.reference_file"),
("tissues", "pvc_input.tissues")]),
# gunzip some files for SPM Normalize
(petpvc, merge_list, [("pvc_output.pvc_out", "in1"),
("pvc_output.brain_mask", "in2"),
("pvc_output.gm_norm", "in3")]),
(pet_input, merge_list, [("in_file", "in4")]),
(merge_list, gunzipper, [("out", "in_file")]),
# warp the PET PVCed to MNI
(petpvc, warp_pet, [("pvc_output.coreg_ref", "image_to_align")]),
(gunzipper, warp_pet, [("out_file", "apply_to_files")]),
(tpm_bbox, warp_pet, [("bbox", "write_bounding_box")]),
# output
(petpvc, pet_output, [("pvc_output.pvc_out", "pvc_out"),
("pvc_output.brain_mask", "brain_mask"),
("pvc_output.coreg_ref", "coreg_ref"),
("pvc_output.coreg_others", "coreg_others"),
("pvc_output.gm_norm", "gm_norm")]),
# output
(warp_pet, pet_output, [("normalized_files", "pvc_warped"),
("deformation_field", "warp_field")]),
])
else: # PET 2 ANAT
collector = setup_node(Merge(2), name='merge_for_warp')
apply_warp = setup_node(spm_apply_deformations(), name="warp_pet")
wf.connect([
# inputs
(pet_input, petpvc, [("in_file", "pvc_input.in_file"),
("anat", "pvc_input.reference_file"),
("tissues", "pvc_input.tissues")]),
# gunzip some files for SPM Normalize
(petpvc, merge_list, [("pvc_output.pvc_out", "in1"),
("pvc_output.brain_mask", "in2"),
("pvc_output.gm_norm", "in3")]),
(pet_input, merge_list, [("in_file", "in4")]),
(merge_list, gunzipper, [("out", "in_file")]),
# warp the PET PVCed to MNI
(gunzipper, collector, [("out_file", "in1")]),
(petpvc, collector, [("pvc_output.coreg_ref", "in2")]),
(pet_input, apply_warp, [("anat_to_mni_warp", "deformation_file")]),
(collector, apply_warp, [("out", "apply_to_files")]),
(tpm_bbox, apply_warp, [("bbox", "write_bounding_box")]),
# output
(petpvc, pet_output, [("pvc_output.pvc_out", "pvc_out"),
("pvc_output.brain_mask", "brain_mask"),
("pvc_output.petpvc_mask", "petpvc_mask"),
("pvc_output.coreg_ref", "coreg_ref"),
("pvc_output.coreg_others", "coreg_others"),
("pvc_output.gm_norm", "gm_norm")]),
# output
(apply_warp, pet_output, [("normalized_files", "pvc_warped"),
("deformation_field", "warp_field")]),
])
if do_atlas:
coreg_atlas = setup_node(spm_coregister(cost_function="mi"), name="coreg_atlas")
# set the registration interpolation to nearest neighbour.
coreg_atlas.inputs.write_interp = 0
wf.connect([
(pet_input, coreg_atlas, [("anat", "source")]),
(petpvc, coreg_atlas, [("pvc_output.coreg_ref", "target")]),
(pet_input, coreg_atlas, [("atlas_anat", "apply_to_files")]),
(coreg_atlas, pet_output, [("coregistered_files", "atlas_pet")]),
])
return wf
def spm_mrpet_grouptemplate_preprocessing(wf_name="spm_mrpet_grouptemplate_preproc"):
""" Run the PET pre-processing workflow against the gunzip_pet.in_file files.
It depends on the anat_preproc_workflow, so if this has not been run, this function
will run it too.
This is identical to the workflow defined in `spm_mrpet_preprocessing`,
with the only difference that we now normalize all subjects agains a custom
template using the spm Old Normalize interface.
It does:
- SPM12 Coregister T1 and tissues to PET
- PVC the PET image in PET space
- SPM12 Warp PET to the given template
Parameters
----------
wf_name: str
Name of the workflow.
Nipype Inputs
-------------
pet_input.in_file: traits.File
The raw NIFTI_GZ PET image file.
pet_input.atlas_anat: traits.File
The atlas file in anatomical space.
pet_input.anat: traits.File
Path to the high-contrast anatomical image.
Reference file of the warp_field, i.e., the anatomical image in its native space.
pet_input.tissues: list of traits.File
List of tissues files from the New Segment process. At least the first
3 tissues must be present.
pet_input.pet_template: traits.File
The template file for inter-subject registration reference.
Nipype outputs
--------------
pet_output.pvc_out: existing file
The results of the PVC process.
pet_output.brain_mask: existing file
A brain mask calculated with the tissues file.
pet_output.coreg_ref: existing file
The coregistered reference image to PET space.
pet_output.coreg_others: list of existing files
List of coregistered files from coreg_pet.apply_to_files.
pet_output.pet_warped: existing file
PET image normalized to the group template.
pet_output.pvc_warped: existing file
The outputs of the PETPVC workflow normalized to the group template.
The result of every internal pre-processing step is normalized to the
group template here.
pet_output.warp_field: existing files
Spatial normalization parameters .mat files.
pet_output.gm_norm: existing file
The output of the grey matter intensity normalization process.
This is the last step in the PET signal correction, before registration.
pet_output.atlas_pet: existing file
Atlas image warped to PET space.
If the `atlas_file` option is an existing file and `normalize_atlas` is True.
Returns
-------
wf: nipype Workflow
"""
# specify input and output fields
in_fields = [
"in_file",
"anat",
"tissues",
"pet_template"
]
out_fields = [
"brain_mask",
"coreg_others",
"coreg_ref",
"pvc_warped",
"pet_warped",
"warp_field",
"pvc_out",
"pvc_mask",
"gm_norm"
]
do_atlas, _ = check_atlas_file()
if do_atlas:
in_fields += ["atlas_anat"]
out_fields += ["atlas_pet"]
# input
pet_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True), name="pet_input")
# workflow to perform partial volume correction
petpvc = petpvc_workflow(wf_name="petpvc")
unzip_mrg = setup_node(Merge(4), name='merge_for_unzip')
gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])
# warp each subject to the group template
gunzip_template = setup_node(Gunzip(), name="gunzip_template")
gunzip_pet = setup_node(Gunzip(), name="gunzip_pet")
warp_mrg = setup_node(Merge(2), name='merge_for_warp')
warp2template = setup_node(spm.Normalize(jobtype="estwrite", out_prefix="wgrptemplate_"), name="warp2template")
get_bbox = setup_node(Function(
function=get_bounding_box,
input_names=["in_file"],
output_names=["bbox"]),
name="get_bbox"
)
# output
pet_output = setup_node(IdentityInterface(fields=out_fields), name="pet_output")
# Create the workflow object
wf = pe.Workflow(name=wf_name)
wf.connect([
# inputs
(pet_input, petpvc, [
("in_file", "pvc_input.in_file"),
("anat", "pvc_input.reference_file"),
("tissues", "pvc_input.tissues")
]),
# get template bounding box to apply to results
(pet_input, get_bbox, [("pet_template", "in_file")]),
# gunzip some inputs
(pet_input, gunzip_pet, [("in_file", "in_file")]),
(pet_input, gunzip_template, [("pet_template", "in_file")]),
# gunzip some files for SPM Normalize
(petpvc, unzip_mrg, [
("pvc_output.pvc_out", "in1"),
("pvc_output.brain_mask", "in2"),
("pvc_output.gm_norm", "in3")
]),
(pet_input, unzip_mrg, [("in_file", "in4")]),
(unzip_mrg, gunzipper, [("out", "in_file")]),
(gunzipper, warp_mrg, [("out_file", "in1")]),
(warp_mrg, warp2template, [(("out", flatten_list), "apply_to_files")]),
# prepare the target parameters of the warp to template
(gunzip_pet, warp2template, [("out_file", "source")]),
(gunzip_template, warp2template, [("out_file", "template")]),
(get_bbox, warp2template, [("bbox", "write_bounding_box")]),
# output
(warp2template, pet_output, [
("normalization_parameters", "warp_field"),
("normalized_files", "pvc_warped"),
("normalized_source", "pet_warped"),
]),
# output
(petpvc, pet_output, [
("pvc_output.pvc_out", "pvc_out"),
("pvc_output.brain_mask", "brain_mask"),
("pvc_output.coreg_ref", "coreg_ref"),
("pvc_output.coreg_others", "coreg_others"),
("pvc_output.gm_norm", "gm_norm")
]),
])
if do_atlas:
coreg_atlas = setup_node(spm_coregister(cost_function="mi"), name="coreg_atlas")
# set the registration interpolation to nearest neighbour.
coreg_atlas.inputs.write_interp = 0
wf.connect([
(pet_input, coreg_atlas, [("anat", "source")]),
(petpvc, coreg_atlas, [("pvc_output.coreg_ref", "target")]),
(pet_input, coreg_atlas, [("atlas_anat", "apply_to_files")]),
(coreg_atlas, pet_output, [("coregistered_files", "atlas_pet")]),
# | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""VT Scope is a debugging aid for developers of terminal emulators.
VT Scope provides an interactive shell which can load a pre-recorded terminal
session and play it back to one or more clients in a controlled manner.
It is possible to play through to a particular offset in the input or play until
a given number of escape sequences have been encountered.
The next escape sequence is displayed in VT Scope before it is sent to the
target terminal(s), so that you know what's going to be printed before it
happens.
You can connect multiple destination terminals to the scope, in order to A/B
test a known-good terminal with one under development. Clients connect over a
TCP socket to port 8383. VT Scope only listens on the local 127.0.0.1
interface.
Canned VT sessions can be created by enabling logging in xterm.
Sample usage looks like this:
# Open a can of data...
vtscope> open ../test_data/vttest-01.log
Read 16723 bytes from ../test_data/vttest-01.log.
# When the next chunk of data is plain text, the offset, byte count,
# and first 15 bytes are displayed...
Next up: offset 0, 19 chars: "# 20120103.1540..."
# Wait for two clients...
vtscope> accept 2
Listening on 127.0.0.1:8383
Waiting for client 1/2...
# At this point, open an xterm and type 'nc 127.0.0.1 8383', then open
# hterm and do the same.
Remote connected by ('127.0.0.1', 49464)
Waiting for client 2/2...
Remote connected by ('127.0.0.1', 49465)
# Single step through the data...
vtscope> step
# When the next chunk of data is an escape sequence, it is displayed
# with added spaces to make it easier to read.
Next up: offset 19, ESC [ 0 c
# Press ENTER to repeat the previous command.
vtscope>
Next up: offset 23, ESC [ ? 1 l
# Step through multiple escape sequences at a time
vtscope> step 10
Next up: offset 28, ESC [ ? 3 l
...
Next up: offset 73, ESC [ 0 m
# Start from the beginning of the data...
vtscope> reset
Next up: offset 0, 19 chars: "# 20120103.1540..."
# Seek directly to an offset, reset first if necessary...
vtscope> seek 73
Next up: offset 19, ESC [ 0 c
...
Next up: offset 73, ESC [ 0 m
# Exit vtscope. Pressing Ctrl+D on a blank line works too.
vtscope> exit
Check the comments in the "cmd_*" methods below for details about specific
commands.
"""
from __future__ import print_function
import argparse
import atexit
import traceback
import json
import os
import re
import readline
import socket
import sys
import time
HISTFILE = os.path.expanduser('~/.vtscope_history')
LISTEN_HOST = '127.0.0.1'
LISTEN_PORT = 8383
PROMPT = 'vtscope> '
MAX_TEXT = 15
class VTScope:
"""The VTScope tool."""
# The list of connected terminals.
clients = []
# True if we're running the REPL.
running = False
# The canned data.
data = ''
# The amount of sleep time between each character, in ms.
delay_ms = 0
# The list of header-defined OFFSETs where we might want to stop and view
# the current state.
stops = []
# The current start/end position in the data. The characters between these
# two positions are next up to be sent to the clients.
start_position = 0
end_position = 0
# Patterns for escape sequences we expect to see in the data.
re_escapes = (
# Control Sequence Introducers.
('CSI', re.compile(r'\[.*?[@-~]')),
# Operating System Commands.
('OSC', re.compile(r'\].*?(\x1b\\|\x07)')),
# Privacy Messages.
('PM', re.compile(r'^.*?(\x1b\\|\x07)')),
# Device Control Strings.
('DCS', re.compile(r'P.*?(\x1b\\|\x07)')),
# Application Program Control.
('APC', re.compile(r'_.*?(\x1b\\|\x07)')),
# DEC private sequences.
('DEC', re.compile(r'#[^\x1b]')),
# Character set control.
('CHR', re.compile(r'%[^\x1b]')),
# Graphic character sets.
('SCS', re.compile(r'[()*+-./][^\x1b]')),
# Other escape sequences.
('ESC', re.compile(r'[^\x1b]')),
)
def run(self):
"""Start the VTScope REPL."""
# Pressing ENTER on a blank line re-executes the previous command.
last_command_line = ''
self.running = True
while self.running:
try:
command_line = input(PROMPT)
except KeyboardInterrupt:
print('^C')
continue
except EOFError:
self.running = False
print('exit')
return
if not command_line:
command_line = last_command_line
else:
command_line = command_line.strip()
self.dispatch_command(command_line)
last_command_line = command_line
def scan_header(self, header):
"""Scan the header for OFFSET blocks where we might want to stop and
view the current state.
"""
offset_re = re.compile(
r'^@@\s+OFFSET:(\d+)\s+LINES:(\d+)\s+CURSOR:(\d+),(\d+)\s*$',
re.MULTILINE)
self.stops = []
m = offset_re.search(header)
while m:
self.stops.append({
'offset': int(m.group(1)),
'lines': int(m.group(2)),
'row': int(m.group(3)),
'column': int(m.group(4)),
})
m = offset_re.search(header, m.end())
def find_next_chunk(self):
"""Advance start_position and end_position to the next chunk in the
canned data.
"""
self.start_position = self.end_position
if self.start_position >= len(self.data):
return ''
if self.data[self.start_position] == '\x1b':
m = None
for (esc_name, pattern) in self.re_escapes:
m = pattern.match(self.data, self.start_position + 1)
if m:
break
if m:
self.end_position = m.end()
else:
self.end_position = self.start_position + MAX_TEXT
print('Unable to find end of escape sequence.')
sequence = self.data[self.start_position + 1 : self.end_position]
return json.dumps(esc_name + ' ' + ' '.join(sequence))[1:-1]
else:
self.end_position = self.data.find('\x1b', self.start_position)
if self.end_position == -1:
self.end_position = len(self.data)
plaintext = self.data[self.start_position : self.end_position]
if len(plaintext) > MAX_TEXT:
plaintext = plaintext[0:MAX_TEXT] + '...'
return ('%s chars: %s' %
(self.end_position - self.start_position,
json.dumps(plaintext)))
def show_next_chunk(self):
"""Find the next chunk of data, and display it to the user."""
snippet = self.find_next_chunk()
if snippet:
print('Next up: offset %s, %s' % (self.start_position, snippet))
else:
print('End of data.')
def send(self, data):
"""Broadcast a string to all clients.
This automatically removes any clients that appear to have disconnected.
"""
for i in range(len(self.clients), 0, -1):
fd = self.clients[i - 1]
try:
fd.send(data)
except IOError:
print('Client #%s disconnected.' % i)
del self.clients[i - 1]
def broadcast_chunk(self):
"""Broadcast the current chunk of data to the connected clients."""
if not self.delay_ms:
self.send(self.data[self.start_position : self.end_position])
else:
# If we have a delay, send a character at a time.
for ch in self.data[self.start_position : self.end_position]:
self.send(ch)
time.sleep(self.delay_ms / 1000.0)
def dispatch_command(self, command_line):
"""Dispatch a command line to an appropriate cmd_* method."""
command_args = command_line.split(' ')
command_name = command_args[0]
command_args = command_args[1:]
if not command_name:
return
command_function = getattr(self, 'cmd_' + command_name, None)
if not command_function:
print('Unknown command: "%s"' % command_name)
return
try:
command_function(command_args)
except Exception: # pylint: disable=broad-except
traceback.print_exc()
print('Internal error executing "%s"' % (command_name,))
# Commands start here, in alphabetical order.
def cmd_accept(self, args):
"""Wait for one or more clients to connect.
Usage: accept <client-count>
If <client-count> starts with a '+' as in 'accept +1', then this will
allow additional clients to connect. Otherwise all existing connections
are reset before accepting.
Clients can connect using the the 'nc' (aka netcat) command, with...
$ nc 127.0.0.1 8383
"""
if not args:
print('Missing argument.')
return
if args[0][0] == '+':
count = len(self.clients) + int(args[0][1:])
else:
count = int(args[0])
self.clients = []
print('Listening on %s:%s' % (LISTEN_HOST, LISTEN_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((LISTEN_HOST, LISTEN_PORT))
sock.listen(1)
while len(self.clients) < count:
print('Waiting for client %s/%s...' %
(len(self.clients) + 1, count))
(fd, addr) = sock.accept()
self.clients.append(fd)
print('Remote connected by', addr)
sock.close()
def cmd_bstep(self, args):
"""Step a given number of bytes."""
if args:
count = int(args[0])
else:
count = 1
self.end_position = self.start_position + count
if self.end_position == len(self.data):
self.end_position = len(self.data)
self.cmd_step([])
def cmd_delay(self, args):
"""Set a delay between each character, in milliseconds."""
if args:
self.delay_ms = int(args[0])
print('Delay is now: %s' % self.delay_ms)
def cmd_exit(self, args):
"""Exit vtscope.
Usage: exit
"""
if args:
print('Command takes no arguments')
return
self.running = False
def cmd_help(self, args):
"""Display help information to the user."""
if args:
print('Command takes no arguments')
return
first = True
for method in dir(self):
if method.startswith('cmd_'):
if not first:
print()
else:
first = False
print('%s:' % (method[4:],))
for line in getattr(self, method).__doc__.strip().splitlines():
if line.startswith(' '):
line = line[8:]
print(' %s' % (line,))
def cmd_send(self, args):
r"""Send a string to all clients.
You can use ESC to signify the ASCII code for escape, and JSON notation
to specify any other non-printables.
JSON notation uses '\uXXXX' to specify arbitrary codepoints, as well as
common | |
np.size(masses)+1))
# Qs = np.ones((chain_length, np.size(mass_r))) * kT * mass_r * tau **2
# Qs[0, :] *= dim
# Qs[0, -1] *= dim
# return cls(kT, Qs, nc)
#
# def initialize(self, kT=None, Qs=None):
# """
# Create initial positions and velocities of auxiliary degrees of freedom,
# positions are set to zero and velocities are boltzmann distributed
#
# Args:
# kT: Temperature in energy units
# Qs: np.array (chain_length, natom) of masses of NHCs
#
# Returns:
# aux_q, aux_v: auxiliary variables for position and velocity
# """
# if kT is None:
# kT = self.params['kT']
# if Qs is None:
# Qs = self.params['Qs']
#
# aux_q = np.zeros_like(Qs)
# factor = np.sqrt(kT/Qs)
# aux_v = np.random.normal(scale=factor)
# return aux_q, aux_v
#
# def compute_nose_kinetic_energy(self, velocities, masses):
# return 0.5 * np.sum(velocities ** 2 * masses)
#
# def compute_nose_potential_energy(self, coordinates, gkt, gnkt):
# return np.sum(coordinates[0] * gnkt) + np.sum(coordinates[1:] * gkt)
#
# def update(self, step_length, state):
# self.aux_q = np.copy(state['aux_position_NH'])
# self.aux_v = np.copy(state['aux_velocity_NH'])
# # Atomwise KE (note the factor of two)
# akin = np.sum(state['V']**2, axis=1) * np.reshape(state['masses'], (-1, ))
# vkin = vmass * vlogv**2
# kin = np.concatenate([akin, vkin])
# scale = np.ones_like(kin)
# self.aux_a = np.zeros_like(self.aux_q)
# self.gnkt = np.shape(state['V'])[-1] * self.params['kT']
# self.gkt = self.params['kT']
# self.aux_a[0] = (kin - self.gnkt) / self.params['Qs'][0]
# self.aux_a[1:] = (self.params['Qs'][:-1] * self.aux_v[:-1]**2 - self.gkt) / self.params['Qs'][1:]
# self.aux_a_V = 3.0 * (self.Pint - self.params['pressure']) / vmass #TODO
#
# for k in range(self.params['nc']): # loop of integrations substeps
# for w in self.ws: # loop of steps in Yoshida Suzuki integrator
# # This is sort of hacky due to translation from TeraChem, which
# # was itself translated from DOI: 10.1080/00268979600100761
# # appendix A
# wdts2 = w * step_length / self.params['nc']
# wdts4 = wdts2 * 0.5
# wdts8 = wdts4 * 0.5
#
# self.aux_v[self.M-1] += self.aux_a[self.M-1] * wdts4
# # Intra chain coupling M to 0
# for Mi in range(self.M-1):
# aa = np.exp(-wdts8 * self.aux_v[self.M-(Mi+1)])
# self.aux_v[self.M-1-(Mi+1)] = self.aux_v[self.M-1-(Mi+1)] * aa**2 + wdts4 * aa * self.aux_a[self.M-1-(Mi+1)]
#
# # Update kinetic energy
# aa = np.exp(-wdts2 * self.aux_v[0])
# scale *= aa
# self.aux_a[0] = (akin * scale**2 - self.gnkt) / self.params['Qs'][0]
#
# # Update positions
# self.aux_q += wdts2 * self.aux_v
#
# # Intra chain coupling 0 to M
# for Mi in range(self.M-1):
# aa = np.exp(-wdts8 * self.aux_v[Mi+1])
# self.aux_v[Mi] = self.aux_v[Mi] * aa**2 + wdts4 * aa * self.aux_a[Mi]
# self.aux_a[Mi+1] = (self.params['Qs'][Mi] * self.aux_v[Mi]**2 - self.gkt) / self.params['Qs'][Mi+1]
#
# self.aux_v[self.M-1] += self.aux_a[self.M-1] * wdts4
#
# # All this work to rescale velocities
# self.V = state['V'] * np.reshape(scale, (-1, 1))
# self.energy = self.compute_nose_kinetic_energy(self.aux_v, self.params['Qs'])
# self.energy += self.compute_nose_potential_energy(self.aux_q, self.gkt, self.gnkt)
# self.state_update = {
# 'V' : self.V,
# 'aux_position_NH': self.aux_q,
# 'aux_velocity_NH': self.aux_v,
# 'NHC_energy': self.energy,
# }
# return self.state_update
#class NoseHooverNPTPositionUpdate(PositionUpdate):
# coeffs = np.array([1.0/6.0, 1.0/120.0, 1.0/5040.0, 1.0/362880.0])
#
# def update(self, step_length, state):
# vlogv =
# aa = np.exp(0.5 * step_length * vlogv)
# aa2 = aa * aa
# arg2 = (0.5 * vlogv * step_length) ** 2
# poly = (((self.coeffs[3] * arg2 + self.coeffs[2]) * arg2 + self.coeffs[1]) * arg2 + coeffs[0]) * arg2 + 1.0
# bb = aa * poly * step_length
# self.X = state['X'] * aa2 + state['V'] * bb
# self.aux_q = state['aux_position_NH'] + vlogv * step_length
# self.state_update = {
# 'X' : self.X,
# }
# return self.state_update
class DistanceAnchor(Update):
"""
Move atoms by mass weighted coordinates to given distance. Without being
wrapped by TimeDependent Update, the positions are held constant at
dist_stop. With it, they can be interpolated from their initial distance to
the final distance. The rate is determined by linearly going from the
interatomic distance at the time_start to the dist_stop at the time_stop.
Velocities of the selected atoms are also set to zero.
This update should be placed immediately before or after the position update.
Params:
mass1 (float): mass of first atom
mass2 (float): mass of second atom
atom_ind1 (int): first atom index to pull toward one another
atom_ind2 (int): second atom index to pull toward one another
dist_stop (float): distance at which to stop pulling the atoms together
interpolate (bool): True to linearly move the wells based on time_frac if Update is TimeDependent
"""
h5_keys = ['X']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self,
mass1,
mass2,
atom_ind1,
atom_ind2,
dist_stop,
interpolate=False,
name='distance_anchor',
):
self.params = {
'mass1': mass1,
'mass2': mass2,
'atom_ind1': atom_ind1,
'atom_ind2': atom_ind2,
'dist_stop': dist_stop,
'interpolate': interpolate,
'name' : name,
}
self.requirements = set(['X', 'V'])
self.time_frac = 1.0 # Use the time_frac to determine when to restart
self.X1_start = None
self.X2_start = None
self.X1_move = None
self.X2_move = None
self.X = None
self.V = None
self.state_update = {}
def reset(self, state):
# Compute vector between atoms and initial distance
self.X1_start = state['X'][self.params['atom_ind1'], :]
self.X2_start = state['X'][self.params['atom_ind2'], :]
vec_start = self.X2_start - self.X1_start
dist_start = np.linalg.norm(vec_start)
# Compute mass weighted distances that each atom should move
dist1 = (dist_start - self.params['dist_stop']) * self.params['mass2'] / (self.params['mass1'] + self.params['mass2']) / dist_start
dist2 = (dist_start - self.params['dist_stop']) * self.params['mass1'] / (self.params['mass1'] + self.params['mass2']) / dist_start
# Compute vector that atoms will travel along
self.X1_move = vec_start * dist1
self.X2_move = -vec_start * dist2
def update(self, step_length, state):
self.X = np.copy(state['X'])
self.V = np.copy(state['V'])
if self.params['interpolate']:
# Restart movement cycle
if state['time_frac'] <= self.time_frac:
self.reset(state)
self.time_frac = state['time_frac']
else:
self.reset(state)
self.time_frac = 1.0
# Linearly interpolate along vector as time goes by
self.X[self.params['atom_ind1'], :] = self.X1_start + self.time_frac * self.X1_move
self.X[self.params['atom_ind2'], :] = self.X2_start + self.time_frac * self.X2_move
# Remove velocities
self.V[self.params['atom_ind1'], :] = 0.0
self.V[self.params['atom_ind2'], :] = 0.0
self.state_update = {
'X' : self.X,
'V' : self.V,
}
return self.state_update
class Recenter(Update):
"""
Move center of mass to origin, remove center of mass
translational/rotational velocity.
Useful in combination with forces that do not preserve such quantities, such
as stochastic thermostats.
Should probably be placed prior to a position update.
Params:
masses ({nparticle,} ndarray): masses of particles
"""
h5_keys = ['X']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self,
masses,
name='recenter',
):
self.params = {
'masses': masses,
'name' : name,
}
self.requirements = set(['X', 'V'])
self.X = None
self.V = None
self.state_update = {}
def update(self, step_length, state):
self.X, self.V = init.initialize_centered(state['X'], state['V'], self.params['masses'])
self.state_update = {
'X': self.X,
'V': self.V,
}
return self.state_update
class MetropolisHastings(Update):
"""
This update proceeds in two steps, the first step simply holds the position
and momentum of the state, the second checks whether the new state is
probable, and if not the state is reset to the previous with flipped
momentum.
The order of integration should generally be (according to Free Energy Computations):
[Thermostat, MetropolisHastings, Velocity, Position, Velocity, MetropolisHastings, Thermostat]
Params:
masses ({nparticle,} ndarray): masses for particles
kT (float): temperature in energy
potential_key (str): state key that corresponds to desired potential energy to check
"""
h5_keys = []
h5_shapes = []
h5_types = []
def __init__(self,
masses,
kT,
potential_key='potential_energy',
name='hmc',
):
self.requirements = set(['X', 'V', potential_key])
self.params = {
'masses': masses,
'kT': kT,
'potential_key': potential_key,
}
self.potential_key = potential_key
self.counter = 0
self.X_init = None
self.V_init = None
self.PE_init = None
self.KE_init = None
self.PE_final = None
self.KE_final = None
self.state_update = {}
def update(self, step_length, state):
self.counter += 1
if self.counter % 2 == 1: # First call in integration loop, just tabulate current state
self.X_init = state['X']
self.V_init = state['V']
self.PE_init = state[self.potential_key]
self.KE_init = utils.compute_kinetic_energy(state['V'], self.params['masses'])
self.state_update = {}
else: # Second call in integration loop
self.PE_final = state[self.potential_key]
self.KE_final = utils.compute_kinetic_energy(state['V'], self.params['masses'])
diff = self.PE_final + self.KE_final - (self.PE_init + self.KE_init)
if np.random.uniform() < np.min(1.0, np.exp(-diff / self.params['kT'])):
self.state_update = {} # Keep current trajectory
else:
self.state_update = { # Revert to before, flip momentum
'X': self.X_init,
'V': -self.V_init,
}
return self.state_update
class BXDE(Update):
"""
This update proceeds in two steps, the first step simply holds the position
/momentum/gradient of the state, the second checks whether the new state has
crossed an energy barrier, if so the velocities are reflected away from the
barrier.
Different from the paper, the user may give a delta_PE which defines | |
<filename>fanslicer/pycuda_simulation/intensity_volume.py
# coding=utf-8
# pylint:disable=too-many-locals,unsupported-assignment-operation,too-many-instance-attributes
"""
Module with intensity volume class, to be used
for simulation of 2D intensity maps from a 3D
intensity volume
"""
import json
import os
import numpy as np
import matplotlib.pyplot as plt
import pydicom as dicom
import nibabel as nib
import pycuda.driver as drv
import pycuda.gpuarray as gpua
from pycuda.compiler import SourceModule
import fanslicer.pycuda_simulation.cuda_reslicing as cres
class IntensityVolume:
"""
Class that holds a 3D intensity volume image
and tools for reslicing it
"""
def __init__(self,
config_dir,
vol_dir,
image_num=1,
downsampling=1,
file_type='npy',
npy_config=None):
"""
Create intensity volume object
:param config_dir: json file with reslicing parameters
:param vol_dir: file with 3D volume
:param file_type: type of 3D volume to be loaded,
currently nii or dicom
:param image_num: number of images to consider for preallocation
:param downsampling: downsampling factor on image dimensions
"""
self.planar_resolution = None
self.ct_volume = None
self.voxel_size = None
self.bound_box = None
self.xdim = None
self.ydim = None
self.zdim = None
if os.path.isfile(config_dir):
config_file = open(config_dir)
self.config = json.load(config_file)
else:
raise ValueError("No valid config file!")
# Check whether a nii or dicom is to be
# loaded
if file_type == 'dicom':
self.load_volume_from_dicom(vol_dir)
if file_type == 'nii':
self.load_volume_from_nii(vol_dir)
if file_type == 'npy':
self.load_volume_from_npy(vol_dir, npy_config)
# In order to speed up slicing, preallocate variables
# Call function to preallocate relevant variables
# to an existing list, first the GPU ones
self.g_variables = []
# Image dimensioning parameters
self.image_variables = []
# Kernel dimensioning
self.blockdim = np.array([1, 1])
# Initialise image num and downsample
self.image_num = None
self.downsampling = None
# Now run allocation to set these vars
self.preallocate_gpu_var(image_num=image_num,
downsampling=downsampling)
# Read kernel source code in C++
self.kernel_code = cres.RESLICING_KERNELS
def load_volume_from_dicom(self, dicom_dir):
"""
Loads volume from Dicom
:param dicom_dir: dicom file
"""
if not os.path.isdir(dicom_dir):
raise ValueError("No valid file directory for dicom!")
image_list = os.listdir(dicom_dir)
image_list.sort()
# Get the parameters of the volume by checking the first image
first_image = dicom.dcmread(dicom_dir + image_list[0])
# Get planar resolution
self.planar_resolution = first_image.PixelSpacing
# Get z stepping
z_step = first_image.SpacingBetweenSlices
# Define voxel size
self.voxel_size = np.hstack((self.planar_resolution,
abs(z_step)))
# Get x y z dimensions
self.xdim = first_image.pixel_array.shape[0]
self.ydim = first_image.pixel_array.shape[1]
self.zdim = len(image_list)
self.ct_volume = np.zeros([self.xdim, self.ydim, self.zdim])
# Get intensity scales
for dicom_key in first_image.keys():
if first_image[dicom_key].keyword == 'RescaleIntercept':
intensity_bias = first_image[dicom_key].value
if first_image[dicom_key].keyword == 'RescaleSlope':
intensity_slope = first_image[dicom_key].value
# Go through every image
for i in range(self.zdim):
# Get image
current_image = dicom.dcmread(dicom_dir + image_list[i]).pixel_array
# Add to volume, taking into account z direction
if z_step > 0:
self.ct_volume[:, :, i] = current_image \
* intensity_slope + intensity_bias
else:
self.ct_volume[:, :, self.zdim - i - 1] \
= current_image * intensity_slope \
+ intensity_bias
# Define bounding box
min_x = first_image.ImagePositionPatient[0]
max_x = min_x + self.planar_resolution[0] * (self.xdim - 1)
min_y = first_image.ImagePositionPatient[1]
max_y = min_y + self.planar_resolution[1] * (self.xdim - 1)
if z_step < 0:
max_z = first_image.ImagePositionPatient[2]
min_z = max_z + z_step * (self.zdim - 1)
else:
min_z = first_image.ImagePositionPatient[2]
max_z = min_z + z_step * (self.zdim - 1)
self.bound_box = np.array([[min_x, min_y, min_z],
[max_x, max_y, max_z]])
return 0
def load_volume_from_nii(self, nii_dir):
"""
Loads volume from nii
:param nii_dir: nii file
"""
nii_file = nib.load(nii_dir)
volume = nii_file.get_fdata()
volume = np.flip(volume, axis=0)
volume = np.flip(volume, axis=1)
self.ct_volume = np.asarray(volume)
self.xdim = volume.shape[0]
self.ydim = volume.shape[1]
self.zdim = volume.shape[2]
# Get resolution parameters
affine = nii_file.affine
self.planar_resolution = abs(np.array([affine[0, 0],
affine[1, 1]]))
self.voxel_size = abs(np.array([affine[0, 0],
affine[1, 1],
affine[2, 2]]))
# Get bounding box, checking orientations
if affine[2, 2] > 0:
max_z = affine[2, 3] + affine[2, 2] * (self.zdim-1)
min_z = affine[2, 3]
else:
min_z = affine[2, 3] + affine[2, 2] * (self.zdim-1)
max_z = affine[2, 3]
if affine[1, 1] > 0:
max_y = affine[1, 3] + affine[1, 1] * (self.ydim-1)
min_y = affine[1, 3]
else:
min_y = affine[1, 3] + affine[1, 1] * (self.ydim-1)
max_y = affine[1, 3]
if affine[0, 0] > 0:
max_x = affine[0, 3] + affine[0, 0] * (self.xdim-1)
min_x = affine[0, 3]
else:
min_x = affine[0, 3] + affine[0, 0] * (self.xdim-1)
max_x = affine[0, 3]
self.bound_box = np.array([[min_x, min_y, min_z],
[max_x, max_y, max_z]])
def load_volume_from_npy(self, npy_dir, npy_config):
"""
Loads volume from npy file
:param npy_dir: nii file
:param npy_config: volume resolution for the npy volume
"""
# Add volume data
self.ct_volume = np.load(npy_dir)
# Add resolution parameters, first get config
if os.path.isfile(npy_config):
npy_config_file = open(npy_config)
npy_config = json.load(npy_config_file)
else:
raise ValueError("No valid config for npy file!")
# Now load the parameters
self.planar_resolution = np.array(npy_config["planar resolution"])
self.voxel_size = np.array(npy_config["voxel size"])
self.bound_box = np.array(npy_config["bounding box"])
return 0
def scroll_volume(self):
"""
Shows volume stored in intensity volume object
"""
for z_ind in range(self.zdim):
plt.cla()
plt.imshow(self.ct_volume[:, :, z_ind], cmap='gray')
plt.pause(0.01)
def preallocate_gpu_var(self,
image_num,
downsampling):
"""
Function to generate local gpu variables that will
be used for simulation. Variable sizes depend on the
config parameters. g_ prefix indicates gpu variables
:param image_num: maximum number of images to be simulated
:param downsampling: downsampling value on image dimensions
per call
"""
# First check if current image variables are empty or not,
# (if they have been set before). If they are not, reset
if self.g_variables:
self.g_variables = []
if self.image_variables:
self.image_variables = []
# Check if downsampling is at least 1
if downsampling < 1:
raise ValueError("Downsampling must be greater than 1")
# Check if maximum number of images is valid
if not isinstance(image_num, int) or image_num <= 0:
raise ValueError('image_num must be positive integer')
self.image_num = image_num
self.downsampling = downsampling
# Now, choose between curvilinear and linear array
transducer_type = self.config["simulation"]["transducer"]
if transducer_type == "curvilinear":
# For the curvilinear case, get
# geometrical parameters of fan shape as a float:
# 0-Angular ray resolution, 1-ray depth resolution, 2-angle aperture
# 3-ray depth, 4-ray offset to origin, 5-ray offset to image top
fan_parameters = np.array(self.config["simulation"]["fan_geometry"])
fan_parameters[0] = np.deg2rad(fan_parameters[0])
fan_parameters[2] = np.deg2rad(fan_parameters[2])
fan_parameters[3:6] = fan_parameters[3:6] * fan_parameters[1]
fan_parameters = fan_parameters.astype(np.float32)
# Append them to image variables (becomes index 0)
self.image_variables.append(fan_parameters)
# Get point cloud dimensions from fan parameters, necessary to
# know how many points will be sampled and used for intersection
coord_w = len(np.arange((-fan_parameters[2] / 2).astype(np.float32),
(fan_parameters[2] / 2).astype(np.float32),
fan_parameters[0]))
coord_h = len(np.arange(fan_parameters[4],
fan_parameters[4] + fan_parameters[3],
fan_parameters[1]))
# Append to image variables (becomes index 1)
slice_dim = np.array([coord_w, coord_h, image_num]).astype(np.int32)
self.image_variables.append(slice_dim)
# Through downsampling, obtain the output image dimensions
# and append (becomes index 2)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 3)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Knowing these dimensions, now append preallocate all
# GPU variables. First, 2D and 3D positions of the fans
# (become index 0 and 1, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The 3D positions, with the same size (becomes index 1)
self.g_variables.\
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The fan intersection with the volume (becomes index 2)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim)),
dtype=np.float32))
# The volume to be slice, in a 1D array. The only non-empty
# array (becomes index 3)
volume = self.ct_volume.copy()
volume = volume.reshape([1, np.prod(volume.shape)], order="F")
self.g_variables.append(gpua.to_gpu(volume.astype(np.float32)))
# Now, the outputs, with image_dim as dimension, both images
# and fan shape outline used for interpolation (become
# index 4 and 5, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.float32))
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.int32))
# Determine optimal blocksize for kernels
blockdim_x, blockdim_y = cres.get_block_size(coord_w, coord_h)
self.blockdim = np.array([blockdim_x, blockdim_y])
elif transducer_type == "linear":
# For the linear case, variable definition is simpler
# Get rectangular plane dimensions first, and append
# to image variables (becomes index 0)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 1)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Now preallocate gpu variables, first the positions
# (becomes index 0)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim) * 3),
dtype=np.float32))
# Secondly, | |
"Mashumairesh!!",
["<NAME>", "<NAME>", "<NAME>", "A<NAME>"],
],
["team Umifure", ["Aya Uchida", "Maaya Uchida", "Ayane Sakura"]],
[
"Black Raison d'être",
["Maaya Uchida", "<NAME>", "<NAME>", "Sumire Uesaka"],
],
[
"Teik<NAME>ki-dan・Hana-gumi",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Maaya Uchida",
],
],
["Choujougenshou-bu", ["<NAME>", "Eri Kitamura", "Maaya Uchida"]],
[
"CINDERELLA PROJECT",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Aya Suzaki",
"<NAME>",
"<NAME>",
],
],
[
"Acchi\u21d4Kocchi",
[
"<NAME>",
"<NAME>",
"<NAME>",
"Nobuhiko Okamoto",
"<NAME>ubo",
],
],
[
"N's",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"SMILE♥X",
[
"<NAME>",
"<NAME>",
"Lynn",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["H☆E☆S", ["<NAME>", "<NAME>", "<NAME>"]],
["Trident", ["<NAME>", "<NAME>", "<NAME>"]],
[
"3-nen E-gumi Utatan",
[
"<NAME>",
"<NAME>",
"Nobuhiko Okamoto",
"<NAME>",
"<NAME>",
],
],
[
"3-nen E-gumi Shuugakuryokou 4-han",
[
"<NAME>",
"<NAME>",
"Nobuhiko Okamoto",
"Yoshitaka Yamaya",
"<NAME>",
"<NAME>",
],
],
[
"3-nen E-gumi",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"fourfolium",
["<NAME>", "Megumi Yamaguchi", "Megumi Toda", "<NAME>"],
],
[
"SPR5",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>ai",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"Hekiyou Gakuen Seitokai Lv.2",
["<NAME>", "<NAME>", "Mina", "<NAME>"],
],
[
"PPP",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["Glitter*Green", ["Suzuko Mimori"]],
["Milky Holmes Feathers", ["Ayasa Ito", "Aimi"]],
[
"Prism☆Mates",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME> (Prizzmy)",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Prism☆Box", ["Prism☆Mates", "Prizmmy☆"]],
["Naive", ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]],
[
"PROJECT YAMATO 2199",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"ZAQ",
"ChouCho",
"<NAME>",
"yozuca*",
"CooRie",
"<NAME>",
"<NAME>",
"Faylan",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["<NAME>uku-bu", ["<NAME>", "<NAME>", "Takuma Terashima"]],
[
"Chikyuu Seifuku-bu ES",
["<NAME>", "<NAME>", "<NAME>"],
],
[
"Chikyuu Bouei-bu",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Chikyuu Bouei-bu HK",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Chikyuu Bouei-tai", ["<NAME>", "<NAME>", "<NAME>"]],
[
"Chikyuu Bouei-gumi Gasshou-tai",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Hound Dog", ["<NAME>"]],
[
"Nanamori Chuu☆Goraku-bu",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"Nanamori Chu☆Seitokai",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"Love Planet Five ~I've special unit~",
["KOTOKO", "<NAME>", "MELL", "<NAME>", "<NAME>"],
],
["Etsuko Yakushimaru Metro Orchestra", ["Etsuko Yakushimaru"]],
[
"EXILE TRIBE",
["GENERATIONS", "THE RAMPAGE", "FANTASTICS", "Sandaime J SOUL BROTHERS"],
],
["GOING UNDER GROUND", ["Sou Matsumoto (GOING UNDER GROUND)"]],
["SOUL'd OUT", ["Diggy-MO'"]],
[
"fragments",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["tiaraway", ["<NAME>", "<NAME>"]],
["THE BAND HAS NO NAME", ["<NAME>", "<NAME>"]],
[
"<NAME>",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["Black Biscuits", ["<NAME>", "<NAME>"]],
["SIAM SHADE", ["<NAME>", "<NAME>"]],
["ACID", ["<NAME>", "<NAME>"]],
["Vivian or Kazuma", ["<NAME>", "<NAME>"]],
["livetune", ["kz (livetune)"]],
[
"Friends",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"MAKO",
"<NAME>",
],
],
[
"Magical Sweets",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["<NAME>-tai", ["<NAME>", "<NAME>", "<NAME>"]],
[
"<NAME>-tai",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Uta Kano♪",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Chiwa Saito",
"Asuka Oogame",
"<NAME>",
],
],
["nana×nana", ["<NAME>", "<NAME>"]],
["<NAME>", ["<NAME>", "<NAME>", "Ai Nonaka", "<NAME>"]],
[
"S<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Sister Princess +1",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["PoppinS", ["<NAME>akahara", "Ai Shimizu"]],
["SmileY inc.", ["<NAME>otsubo"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"Y<NAME>",
"<NAME>",
],
],
[
"Ame no <NAME>-dan",
["<NAME>", "<NAME>", "<NAME>"],
],
["Colors", ["<NAME>", "Aoi Yuuki"]],
["Crush Tears", ["Yu Kobayashi"]],
["MilkyWay", ["Say<NAME>", "You Kikkawa", "Koharu Kusumi"]],
["Kira☆Pika", ["Koharu Kusumi", "Mai Hagiwara"]],
["Maids", ["<NAME>", "Aoi Yuuki", "<NAME>"]],
[
"Jashin\u2605Girls",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"XX:me",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["NonSugar", ["<NAME>aka", "<NAME>", "<NAME>"]],
[
"All\u2606Jewel Idols",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["Run Girls, Run!", ["<NAME>", "<NAME>", "<NAME>"]],
[
"Wake Up, Girls!",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Wake Up, May'n!", ["May'n", "Wake Up, Girls!"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Y<NAME>",
],
],
[
"D-selections",
[
"<NAME>",
"<NAME>",
"Yu<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Dressing Flower",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"FriendAll",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Y<NAME>",
"<NAME>",
],
],
["MUG-MO", ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]],
[
"Miracle\u2606Kiratts",
["<NAME>", "<NAME>", "<NAME>"],
],
["TRiANGLE", ["<NAME>aka"]],
[
"Gaarmageddon",
["<NAME>", "<NAME>", "<NAME>"],
],
[
"<NAME>",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["team Sazanami", ["<NAME>", "<NAME>", "<NAME>"]],
[
"QUELL",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"SolidS",
["Takuya Eguchi", "Soma Saito", "Natsuki Hanae", "Yuuichirou Umehara"],
],
["AKATSUKI", ["<NAME>", "<NAME>", "Yuuichirou Umehara"]],
[
"ArtiSTARs",
[
"Yuuichirou Umehara",
"KENN",
"Yuki Ono",
"Wataru Hatano",
"Shouta Aoi",
"Takuya Eguchi",
],
],
["Beit", ["Yuuichirou Umehara", "<NAME>", "<NAME>"]],
[
"apple-polisher",
["Shouta Aoi", "Yuuichirou Umehara", "<NAME>", "<NAME>"],
],
["<NAME>", ["<NAME>", "Yuuichirou Umehara"]],
[
"Kabukibu Rocks",
[
"Yuuichirou Umehara",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Yumenosaki Dream Stars",
[
"Trickstar",
"fine",
"UNDEAD",
"Knights",
"Ryuusei-tai",
"Ra*bits",
"2wink",
"Valkyrie",
"Switch",
"MaM",
],
],
[
"Trickstar",
["Yuki Kaji", "Tetsuya Kakihara", "<NAME>", "<NAME>"],
],
[
"fine",
["Hikaru Midorikawa", "Takuya Eguchi", "<NAME>", "<NAME>"],
],
["UNDEAD", ["Toshiki Masuda", "Kei Hosogai", "Y<NAME>", "<NAME>"]],
[
"Knights",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Ryuusei-tai",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Ra*bits", ["Y<NAME>ai", "<NAME>", "<NAME>", "Jun<NAME>"]],
["2wink", ["Soma Saito"]],
["Valkyrie", ["<NAME>", "Jun Oosuka"]],
["Switch", ["<NAME>", "<NAME>", "<NAME>"]],
["MaM", ["<NAME>"]],
["Eden", ["Adam", "Eve"]],
["THRIVE", ["<NAME>", "<NAME>", "<NAME>"]],
[
"KiLLER KiNG",
["<NAME>", "T<NAME>", "<NAME>", "<NAME>"],
],
[
"MooNs",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Kitakore", ["D<NAME>", "<NAME>"]],
["B-PROJECT", ["Kitakore", "MooNs", "KiLLER KiNG", "THRIVE"]],
["Uchouten BOYS", ["<NAME>", "Taku Yashiro", "Kazutomi Yamamoto"]],
[
"Seidou Koukou Yakyuu-bu",
[
"<NAME>",
"<NAME>",
"<NAME>",
"Yoshitsugu Matsuoka",
"Sh<NAME>",
],
],
[
"<NAME>lywood",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"QUARTET NIGHT",
["<NAME>", "<NAME>", "<NAME>", "Sh<NAME>"],
],
[
"ST☆RISH",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Shining Stars", ["ST☆RISH", "QUARTET NIGHT"]],
[
"Procellarum",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Six Gravity",
[
"<NAME>",
"<NAME>",
"<NAME>",
"Tomoaki Maeno",
"<NAME>",
"KENN",
],
],
["Love Desire", ["Toshiyan", "Tora*", "Sakuya", "kenty"]],
["Toy\u2606Gungun", ["<NAME>", "Tomoaki Maeno", "Yoshitsugu Matsuoka"]],
["Jupiter", ["<NAME>ima", "Yoshitsugu Matsuoka", "Daichi Kanbara"]],
[
"High\u00d7Joker",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["DRAMATIC STARS", ["<NAME>", "<NAME>", "Taku Yashiro"]],
["W", ["<NAME>", "<NAME>"]],
["S.E.M", ["<NAME>", "<NAME>", "<NAME>"]],
[
"315 STARS",
[
"S.E.M",
"W",
"DRAMATIC STARS",
"High\u00d7Joker",
"Jupiter",
"Beit",
"Caf\u00e9 Parade!",
"Mofumofuen",
],
],
[
"From4to7",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"devils and realist",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"THE HIGH CADENCE",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"SKET ROCK",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["Cluster'S", ["<NAME>", "<NAME>", "<NAME>"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Team <NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Atsushi Abe",
],
],
[
"ROUTE85",
[
"<NAME>",
"Atsushi Abe",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["CooRie", ["rino"]],
['MEZZO"', ["Atsushi Abe", "KENN", "Takuya Satou"]],
["X.I.P.", ["<NAME>", "<NAME>", "<NAME>"]],
["3 Majesty", ["<NAME>", "<NAME>", "<NAME>"]],
[
"Jigoku no Sata All Stars",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Liar-S",
["<NAME>o", "Tetsuya Kakihara", "<NAME>", "Takuma Terashima"],
],
[
"Kaidou 4 Kyoudai",
[
"<NAME>",
"<NAME>",
"<NAME>",
"Takuma Terashima",
],
],
[
"Pentacle\u2605",
[
"Soma Saito",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"STYLE FIVE",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Buster Bros!!!", ["<NAME>", "<NAME>", "<NAME>"]],
[
"The Dirty Dawg ",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Fling Posse", ["<NAME>", "<NAME>", "<NAME>"]],
[
"DOKONJOFINGER",
["<NAME>", "<NAME>"],
],
["Matenro", ["<NAME>", "<NAME>", "<NAME>"]],
[
"SOARA",
[
"<NAME>",
"Yuki Ono",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>-<NAME>\u2661Danshis",
[
"<NAME>",
"<NAME>",
"<NAME>",
| |
<filename>algorithm_library.py
"""
Algorithm Library (Python v3.9.6+)
Implemented by <NAME>
For personal educational review.
"""
import sys
import heapq
class TarjanSCC():
"""
Compute all strongly-connected components in a directed graph G.
Utilizes Tarjan's strongly-connected components recursion DFS algorithm.
Returns a list of strongly-connected components.
"""
def __init__(self, graph):
"""
Instantiate graph information for strongly-connected component searching of G.
:param graph <list<list>>: Adjacency matrix for the graph G. Nodes are indexed by
non-negative integers, i.e. 0, 1, 2, ...
"""
self.G = graph # Adjacency Matrix for Graph
self.dfs_stack = [] # DFS Stack
self.index = 0 # Exploration Index
self.D = {
k: {
'index': None, # Track exploration index.
'minlink': None, # Track minimal sub-tree / reachable index.
'instack': False # Track DFS stack presence (for efficient lookup).
}
for k in range(len(graph))
}
def tarjan_dfs(self, reverse=False):
"""
Execute Tarjan's strongly-connected components algorithm. Sorted in topological order from source to sink.
:param reverse <bool>: Topological sort on list of SCC from sinks to sources instead of sources to sinks.
"""
# Search for strongly-connected components for all nodes in the graph.
SCC = []
for v in range(len(self.G)):
# Skip explored nodes.
if self.D[v]['index'] is None:
# Identify strongly-connected components associated with minimal reachable node v.
component = self.scc(v)
if component:
SCC.append(component)
# Topological Sort
if not reverse:
# Reverse the discovered list of SCC to sort
# in order from sources to sinks instead of
# sinks to sources in the graph G.
SCC.reverse()
# Output list of SCC.
return SCC
def scc(self, v):
"""
Identify strongly-connected components associated with the minimal reachable node v.
"""
# Process the node v. Set the exploration index,
# initialize the minlink index, and push into stack.
self.D[v]['index'] = self.index
self.D[v]['minlink'] = self.index
self.index += 1
self.dfs_stack.append(v)
self.D[v]['instack'] = True
# Explore adjacent nodes.
for w in range(len(self.G)):
# Adjacent reachable nodes.
if self.G[v][w] != 0:
# Unexplored node.
if self.D[w]['index'] is None:
# Analyze strongly-connected sub-component of node w.
self.scc(w)
# Update the minimum exploration index reachable from w.
self.D[v]['minlink'] = min(
self.D[v]['minlink'],
self.D[w]['minlink']
)
# Explored node in the DFS stack.
elif self.D[w]['instack']:
# Update the minimum exploration index relative to
# the back-edge node. Do NOT utilize the minimum
# reachable exploration index of the back-edge node,
# which considers minimum reachable exploration indices
# of the sub-tree of the back-edge node!
self.D[v]['minlink'] = min(
self.D[v]['minlink'],
self.D[w]['index']
)
# Explored nodes not in the DFS stack are pre-discovered SCC's.
else:
# Neglect irrelevant nodes.
continue
# Output the SCC if the node is a minimal reachable node of the SCC.
scc_detect = []
if self.D[v]['minlink'] == self.D[v]['index']:
# Include nodes in the sub-tree of the minimal reachable node.
while self.dfs_stack and self.D[self.dfs_stack[-1]]['index'] >= self.D[v]['index']:
w = self.dfs_stack.pop()
scc_detect.append(w)
self.D[w]['instack'] = False
return scc_detect
class DijkstraBFS():
def __init__(self, graph, maximal=False):
"""
Instantiate graph information for minimal breadth-first searching in Dijkstra's Algorithm.
:param graph <list<list>>: Adjacency matrix (with optional weights) for the graph G.
Nodes are indexed by non-negative integers, i.e. 0, 1, 2, ...
:param maximal <bool>: Return maximal path(s) / distance(s) instead.
"""
self.G = graph
extrema = float('inf') if not maximal else -float('inf')
self.dist = {
x: {
y: extrema if x != y else 0
for y in range(len(graph))
} for x in range(len(graph))
}
self.path = {
x: {
y: [] if x != y else [x]
for y in range(len(graph))
} for x in range(len(graph))
}
self.maximal = maximal
def bfs(self, initial_node=None):
"""
Perform a minimal (or maximal) breadth-first search of the graph G.
:param initial_node <int>: Initial node specification instead of processing entire graph.
"""
# Search from all initial nodes in case of directed or disconnected components.
task = list(range(len(self.G)))
if initial_node is not None and initial_node in task:
task = [initial_node]
for v in task:
# Reset queue and processed set.
heap = []
heapq.heappush(
heap,
(0,v)
)
processed = set()
# BFS
while heap:
# Pop minimal node. Pre-emptively set node as processed.
_, a = heapq.heappop(heap)
processed.add(a)
# Search for adjacent nodes.
for b in range(len(self.G)):
if b != a and self.G[a][b] != 0:
# Update distance and path.
if any([
not self.maximal and self.dist[v][b] > self.dist[v][a] + self.G[a][b],
self.maximal and self.dist[v][b] < self.dist[v][a] + self.G[a][b]
]):
self.dist[v][b] = self.dist[v][a] + self.G[a][b]
self.path[v][b] = self.path[v][a] + [b]
# Push un-processed adjacent nodes onto priority heap / queue.
if b not in processed:
heapq.heappush(
heap,
(self.G[a][b], b) if not self.maximal else (-self.G[a][b], b)
)
# Output distance(s) and path(s) in the graph G.
return self.dist, self.path
class KruscalMST():
def __init__(self, graph, maximal=False):
"""
Instantiate graph information for Kruskal's Minimal Spanning Tree algorithm.
:param graph <list<list>>: Adjacency matrix (with optional weights) for the graph G.
Nodes are indexed by non-negative integers, i.e. 0, 1, 2, ...
:param maximal <bool>: Return a maximal spanning tree instead.
"""
# Instantiate graph and sort edge weights.
self.G = graph
self.E = []
for i in range(len(graph)):
for j in range(len(graph)):
# Insert weighted edge into priority heap / queue.
if graph[i][j] != 0: # Non-existent edge.
heapq.heappush(
self.E,
(graph[i][j], (i,j)) if not maximal else (-graph[i][j], (i,j))
)
self.setcache = {
x: set([x]) for x in range(len(graph))
}
self.maximal = maximal
def mst(self):
"""
Compute a list of edges that constitutes the minimal spanning tree of the graph G.
Return list of edges constituting minimal spanning tree, and the cumulative tree edge weight score.
"""
# Build minimal spanning tree.
tree = []
score = 0
while len(tree) < len(self.G):
# Pop the minimal edge.
w, e = heapq.heappop(self.E)
# Combine sets.
if self.setcache[e[0]] != self.setcache[e[1]]:
# Union.
u = self.setcache[e[0]] | self.setcache[e[1]]
self.setcache[e[0]] = u
self.setcache[e[1]] = u
# Append edge to MST.
tree.append(e)
if not self.maximal:
score += w
else:
score -= w
return tree, score
class KnapSack():
def __init__(self, value, cost, weight=None, repetition=False):
"""
Instantiate dynamic memory for the KnapSack Problem.
:param value <list<float>>: List of values / gains / profits for items in the knapsack.
:param cost <list<int>>: List of (positive integer) weights / losses / costs for items in the knapsack.
:param weight <int|None>: Maximum weight of knapsack. If not set, default to sum of all costs.
:param repetition <bool>: Repeat items in knapsack.
"""
# Validate input.
if any([
len(value) != len(cost),
any(not isinstance(x, int) or x <= 0 for x in cost),
weight is not None and not isinstance(weight, int)
]):
print(
f"""[KnapSackError] Cannot solve knapsack problem with non-integral or non-positive weight(s) / cost(s).
For non-integral cost(s), either approximate costs to nearest integer or utilize linear programming (LP)
optimization algorithms instead.""",
file=sys.stderr,
flush=True
)
sys.exit(1)
# Instantiate dynamic memory.
self.value = value
self.cost = cost
self.limit = sum(cost)
if weight is not None:
# Set custom knapsack limit for efficiency.
self.limit = int(weight)
self.Q = { # Reward matrix of shape (weight, item).
**{ w: { -1: (0, []) } for w in range(self.limit+1) },
**{ 0: { k: (0, []) for k in range(-1, len(value)) } }
}
self.rep = repetition
def compute_knapsack(self):
"""
Compute the optimal knapsack via dynamic programming.
"""
Q_opt = (-float('inf'), [])
for w in range(self.limit+1):
for k in range(len(self.value)):
if self.cost[k] > w:
# Cannot add item into knapsack without overflowing the limit.
# Set to knapsack not including item k.
self.Q[w][k if not self.rep else -1] = self.Q[w][k-1 if not self.rep else -1]
else:
test_val = self.Q[w-self.cost[k]][k-1 if not self.rep else -1][0] + self.value[k]
if test_val > self.Q[w][k-1 if not self.rep else -1][0]:
# Include new item. Update knapsack.
self.Q[w][k if not self.rep else -1] = (
test_val,
self.Q[w-self.cost[k]][k-1 if not self.rep else -1][1] + [k]
)
else:
# Exclude new item.
self.Q[w][k if not self.rep | |
<gh_stars>0
import os
import time
import copy
import socket
from datetime import datetime, timezone, timedelta
import json as oldjson
import zmq
import zmq.utils.jsonapi as json
from transitions import Machine, MachineError, State
import argparse
import requests
from requests.auth import HTTPBasicAuth
import logging
from psalg.utils.syslog import SysLog
import string
from p4p.client.thread import Context
from threading import Thread, Event
PORT_BASE = 29980
POSIX_TIME_AT_EPICS_EPOCH = 631152000
report_keys = ['error', 'fileReport']
class DaqControl:
'Base class for controlling data acquisition'
# transitionId is a subset of the TransitionId.hh enum
transitionId = {
'ClearReadout' : 0,
'Reset' : 1,
'Configure' : 2,
'Unconfigure' : 3,
'BeginRun' : 4,
'EndRun' : 5,
'BeginStep' : 6,
'EndStep' : 7,
'Enable' : 8,
'Disable' : 9,
'SlowUpdate' : 10,
'L1Accept' : 12,
}
transitions = ['rollcall', 'alloc', 'dealloc',
'connect', 'disconnect',
'configure', 'unconfigure',
'beginrun', 'endrun',
'beginstep', 'endstep',
'enable', 'disable',
'slowupdate', 'reset']
states = [
'reset',
'unallocated',
'allocated',
'connected',
'configured',
'starting',
'paused',
'running'
]
# default readout group is self.platform
def __init__(self, *, host, platform, timeout):
self.host = host
self.platform = platform
self.timeout = timeout
# initialize zmq socket
self.context = zmq.Context(1)
self.front_sub = self.context.socket(zmq.SUB)
self.front_sub.connect('tcp://%s:%d' % (host, front_pub_port(platform)))
self.front_sub.setsockopt(zmq.SUBSCRIBE, b'')
self.front_req = None
self.front_req_endpoint = 'tcp://%s:%d' % (host, front_rep_port(platform))
self.front_req_init()
#
# DaqControl.getState - get current state
#
def getState(self):
retval = 'error'
try:
msg = create_msg('getstate')
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except zmq.Again:
logging.error('getState() timeout (%.1f sec)' % (self.timeout / 1000.))
logging.info('getState() reinitializing zmq socket')
self.front_req_init()
except Exception as ex:
logging.error('getState() Exception: %s' % ex)
except KeyboardInterrupt:
print('KeyboardInterrupt')
else:
try:
retval = reply['header']['key']
except KeyError:
pass
return retval
#
# DaqControl.getPlatform - get platform
#
def getPlatform(self):
retval = {}
try:
msg = create_msg('getstate')
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except zmq.Again:
logging.error('getPlatform() timeout (%.1f sec)' % (self.timeout / 1000.))
logging.info('getPlatform() reinitializing zmq socket')
self.front_req_init()
except Exception as ex:
logging.error('getPlatform() Exception: %s' % ex)
except KeyboardInterrupt:
print('KeyboardInterrupt')
else:
try:
retval = reply['body']
except KeyError:
pass
return retval
#
# DaqControl.getJsonConfig - get json configuration
#
def getJsonConfig(self):
src = self.getPlatform()
dst = {"activedet": {}}
for level, item1 in src.items():
if level == "control":
continue # skip
if level not in dst["activedet"]:
dst["activedet"][level] = {}
for xx, item2 in item1.items():
alias = item2["proc_info"]["alias"]
dst["activedet"][level][alias] = {}
if "det_info" in item2:
dst["activedet"][level][alias]["det_info"] = item2["det_info"].copy()
dst["activedet"][level][alias]["active"] = item2["active"]
return oldjson.dumps(dst, sort_keys=True, indent=4)
#
# DaqControl.storeJsonConfig - store json configuration
#
def storeJsonConfig(self, json_data):
retval = {}
body = {"json_data": json_data}
try:
msg = create_msg('storejsonconfig', body=body)
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except zmq.Again:
logging.error('storeJsonConfig() timeout (%.1f sec)' % (self.timeout / 1000.))
logging.info('storeJsonConfig() reinitializing zmq socket')
self.front_req_init()
except Exception as ex:
logging.error('storeJsonConfig() Exception: %s' % ex)
except KeyboardInterrupt:
print('KeyboardInterrupt')
else:
try:
retval = reply['body']
except KeyError:
pass
return retval
#
# DaqControl.selectPlatform - select platform
#
def selectPlatform(self, body):
retval = {}
try:
msg = create_msg('selectplatform', body=body)
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except zmq.Again:
logging.error('selectPlatform() timeout (%.1f sec)' % (self.timeout / 1000.))
logging.info('selectPlatform() reinitializing zmq socket')
self.front_req_init()
except Exception as ex:
logging.error('selectPlatform() Exception: %s' % ex)
except KeyboardInterrupt:
print('KeyboardInterrupt')
else:
try:
retval = reply['body']
except KeyError:
pass
return retval
#
# DaqControl.getInstrument - get instrument name
#
def getInstrument(self):
r1 = None
try:
msg = create_msg('getinstrument')
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except Exception as ex:
print('getInstrument() Exception: %s' % ex)
else:
try:
r1 = reply['body']['instrument']
except Exception as ex:
print('getInstrument() Exception: %s' % ex)
return r1
#
# DaqControl.getStatus - get status
#
def getStatus(self):
r1 = r2 = r3 = r4 = 'error'
r5 = {}
try:
msg = create_msg('getstatus')
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except Exception as ex:
print('getStatus() Exception: %s' % ex)
except KeyboardInterrupt:
print('KeyboardInterrupt')
else:
try:
r1 = reply['body']['transition']
r2 = reply['body']['state']
r3 = reply['body']['config_alias']
r4 = reply['body']['recording']
r5 = reply['body']['platform']
except KeyError:
pass
return (r1, r2, r3, r4, r5)
#
# DaqControl.monitorStatus - monitor the status
#
def monitorStatus(self):
# process messages
while True:
try:
msg = self.front_sub.recv_json()
if msg['header']['key'] == 'status':
# return transition, state, config_alias, recording
return msg['body']['transition'], msg['body']['state'], msg['body']['config_alias'], msg['body']['recording']
elif msg['header']['key'] == 'error':
# return 'error', error message, 'error', 'error'
return 'error', msg['body']['err_info'], 'error', 'error'
elif msg['header']['key'] == 'fileReport':
# return 'fileReport', path, 'error', 'error'
return 'fileReport', msg['body']['path'], 'error', 'error'
elif msg['header']['key'] == 'progress':
# return 'progress', transition, elapsed, total
return 'progress', msg['body']['transition'], msg['body']['elapsed'], msg['body']['total']
except KeyboardInterrupt:
break
except KeyError as ex:
logging.error('KeyError: %s' % ex)
break
return None, None, None, None
#
# DaqControl.setState - change the state
# The optional second argument is a dictionary containing
# one entry per transition that contains information that
# will be put into the phase1-json of the transition. An example:
# {'beginstep': {'myvalue1':3 , 'myvalue2': {'myvalue3':72}},
# 'enable': {'myvalue5':37, 'myvalue6': 'hello'}}
#
def setState(self, state, phase1Info={}):
errorMessage = None
try:
msg = create_msg('setstate.' + state, body=phase1Info)
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except zmq.Again:
errorMessage = 'setState() timeout (%.1f sec)' % (self.timeout / 1000.)
logging.info('setState() reinitializing zmq socket')
self.front_req_init()
except Exception as ex:
errorMessage = 'setState() Exception: %s' % ex
else:
try:
errorMessage = reply['body']['err_info']
except KeyError:
pass
return errorMessage
#
# DaqControl.setConfig - set BEAM/NOBEAM
#
def setConfig(self, config):
errorMessage = None
try:
msg = create_msg('setconfig.' + config)
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except Exception as ex:
errorMessage = 'setConfig() Exception: %s' % ex
else:
try:
errorMessage = reply['body']['err_info']
except KeyError:
pass
return errorMessage
#
# DaqControl.setRecord - set record flag
# True or False
#
def setRecord(self, recordIn):
errorMessage = None
if type(recordIn) == type(True):
if recordIn:
record = '1'
else:
record = '0'
try:
msg = create_msg('setrecord.' + record)
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except Exception as ex:
errorMessage = 'setRecord() Exception: %s' % ex
else:
try:
errorMessage = reply['body']['err_info']
except KeyError:
pass
else:
errorMessage = 'setRecord() requires True or False'
return errorMessage
#
# DaqControl.setBypass - set bypass_activedet flag
# True or False
#
def setBypass(self, bypassIn):
errorMessage = None
if type(bypassIn) == type(True):
if bypassIn:
bypass = '1'
else:
bypass = '0'
try:
msg = create_msg('setbypass.' + bypass)
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except Exception as ex:
errorMessage = 'setBypass() Exception: %s' % ex
else:
try:
errorMessage = reply['body']['err_info']
except KeyError:
pass
else:
errorMessage = 'setBypass() requires True or False'
return errorMessage
#
# DaqControl.setTransition - trigger a transition
# The optional second argument is a dictionary containing
# information that will be put into the phase1-json of the transition.
# An example:
# {'myvalue1':3 , 'myvalue2': {'myvalue3':72}}
#
def setTransition(self, transition, phase1Info={}):
errorMessage = None
try:
msg = create_msg(transition, body=phase1Info)
self.front_req.send_json(msg)
reply = self.front_req.recv_json()
except Exception as ex:
errorMessage = 'setTransition() Exception: %s' % ex
else:
try:
errorMessage = reply['body']['err_info']
except KeyError:
pass
return errorMessage
#
# DaqControl.front_req_init - (re)initialize the front_req zmq socket
#
def front_req_init(self):
# if socket previouly created, close it
if self.front_req is not None:
self.front_req.close()
# create new socket
self.front_req = self.context.socket(zmq.REQ)
self.front_req.linger = 0
self.front_req.RCVTIMEO = self.timeout
self.front_req.connect(self.front_req_endpoint)
next_dict = {
'reset' : { 'unallocated' : 'rollcall',
'allocated' : 'rollcall',
'connected' : 'rollcall',
'configured' : 'rollcall',
'starting' : 'rollcall',
'paused' : 'rollcall',
'running' : 'rollcall' },
'unallocated' : { 'reset' : 'reset',
'allocated' : 'alloc',
'connected' : 'alloc',
'configured' : 'alloc',
'starting' : 'alloc',
'paused' : 'alloc',
'running' : 'alloc' },
'allocated' : { 'reset' : 'reset',
'unallocated' : 'dealloc',
'connected' : 'connect',
'configured' : 'connect',
'starting' : 'connect',
'paused' : 'connect',
'running' : 'connect' },
'connected' : { 'reset' : 'reset',
'unallocated' : 'disconnect',
'allocated' : 'disconnect',
'configured' : 'configure',
'starting' : 'configure',
'paused' : 'configure',
'running' : 'configure' },
'configured' : { 'reset' : 'reset',
'unallocated' : 'unconfigure',
'allocated' : 'unconfigure',
'connected' : 'unconfigure',
'starting' : 'beginrun',
'paused' : 'beginrun',
'running' : 'beginrun' },
'starting' : { 'reset' : 'reset',
'unallocated' : 'endrun',
'allocated' : 'endrun',
'connected' : 'endrun',
'configured' : 'endrun',
'paused' : 'beginstep',
'running' : 'beginstep' },
'paused' : { 'reset' : 'reset',
'unallocated' : 'endstep',
'allocated' : 'endstep',
'connected' : 'endstep',
'configured' : 'endstep',
'starting' : 'endstep',
'running' : 'enable' },
'running' : { 'reset' : 'reset',
'unallocated' : 'disable',
'allocated' : 'disable',
'connected' | |
fa
fa : fraction of interstitial liquid that returns to the magma.f = fa would
be an example where there is no interstital liquid in the crystallization
front
Returns
-------
Cl_new : array like
concentration of extracted liquid from crystallization front
"""
E = 1.0 / (D * (1.0 - f) + f)
Cl_new = Cl * (F ** ((fa * (E - 1)) / (fa - 1)))
return Cl_new
def fraclin_xtl(Cl, a, b, F):
"""
fraclin_xtl calculates the composition of the liquid remaining after it
has experienced fractional crystallization where the distribution coefficient
varies linearly with melt fraction. This was originally described by
Greenland 1970.
Parameters
----------
Cl : array-like
concentration of the trace element in the original liquid
a : array-like
intercept of the relationship describing the linear change in D with melt
fraction
b : array-like
slope of the relationship describing the linear change in D with
melt fraction
F : array-like
fraction of melt remaining (between 0 and 1).
Returns
-------
Cl_new : TYPE
DESCRIPTION.
"""
Cl_new = Cl * np.exp((a - 1) * np.log(F) + b * (F - 1))
return Cl_new
#%% General mineral recalculation.
def mineral_formula_calc(df, n_oxygens, mineral, normalized,index):
"""
mineral_formula_calc is a function that calculates the stoichiometry for a mineral based on a set of major
element oxide analyses as described by Deer et al., 1966 Appendix 1
Inputs:
df : pandas dataframe object of major element analyses. Column headers must have the the element somewhere in the name
** if a column containing 'Total' in the name exists, it will be removed so that only the individual analyses are
present
** your dataframe should have a column that pertains to sample, analysis number, etc. This will be set as the index
of the dataframe so that chemical formulas can be accessed easily upon calculation
EXAMPLE OF INPUT DATAFRAME:
|sample|SiO2|TiO2|Al2O3|Cr2O3|FeO|BaO|SrO|MnO|CaO|Na2O|K2O|NiO|Total| <---- currently supported elements
n_oxygens : number of ideal oxygens in the chemical formula (e.g., for feldspars this would be 8)
mineral : 'feldspar','olivine','pyroxene'
if 'pyroxene' is chosen, the function will calculate the proportions of Fe2+ and Fe3+ based off stoichiometry and charge
balance as described by Droop 1987. If 'feldspar', all Fe is assumed to be Fe3+. If 'olivine', all Fe is assumed to be 2+
normalized: boolean
if True, will normalize your geochemical analyses. If false, mineral formulas will be calculated using
raw geochemical data
index: string
column denoting which column to be used as the index for the dataframe. Suggested that this is a column that
denotes sample name or spot name or something similar
Returns:
norm_cations: pandas dataframe object that contains the calculated number of cations in the chemical formula
normalized to the amount of ideal oxygens specified by 'n_oxygens'.
"""
data = df.copy()
data.set_index(index,inplace = True)
data.fillna(0, inplace=True)
# if index is not None:
# data.set_index(index,inplace = True)
# else:
# data.index = np
# Removes the 'total column' from the list
columns = list(data.columns)
elements = []
for column in columns:
if "Total" in column:
columns.remove(column)
# can make this a delimeter variable for the user to choose from
# dropping anything after the underscore
for column in columns:
if "Si" in column:
elements.append(column.split("_")[0])
if "Ti" in column:
elements.append(column.split("_")[0])
if "Al" in column:
elements.append(column.split("_")[0])
if "Cr" in column:
elements.append(column.split("_")[0])
if "Fe" in column:
elements.append(column.split("_")[0])
if "Ba" in column:
elements.append(column.split("_")[0])
if "Sr" in column:
elements.append(column.split("_")[0])
if "Mn" in column:
elements.append(column.split("_")[0])
if "Mg" in column:
elements.append(column.split("_")[0])
if "Na" in column:
elements.append(column.split("_")[0])
if "K" in column:
elements.append(column.split("_")[0])
if "Ca" in column:
elements.append(column.split("_")[0])
if "Ni" in column:
elements.append(column.split("_")[0])
if "Cl" in column:
elements.append(column.split("_")[0])
if "P2O5" in column:
elements.append(column.split("_")[0])
# create new dataframe that is just the analyses without the total
oxides = data.loc[:, columns]
oxides.columns = elements
if normalized == True:
# normalize the wt%
oxides_normalized = 100 * (oxides.div(oxides.sum(axis="columns"), axis="rows"))
elif normalized == False:
oxides_normalized = oxides.copy()
# create an array filled with zeros such that it is the same shape of our input
# data
mol_cations = np.zeros(oxides_normalized.shape)
# these loops are saying that: for each element in my list of elements (e.g., columns)
# check to see if the given string (e.g., Si) is in it. If it is, then populate that column
# of the array with the appropriate math
# Here we call on the mendeleev package module 'element' to get the mass from a given element
# e.g.(el(element).mass)
for i, element in zip(range(len(elements)), elements):
if "Si" in element:
mol_cations[:, i] = oxides_normalized[element] / (28.09 + (16 * 2))
elif "Ti" in element:
mol_cations[:, i] = oxides_normalized[element] / (47.87 + (16 * 2))
elif "Al" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / (
(26.98 * 2) + (16 * 3)
)
elif "Cr" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / ((52 * 2) + (16 * 3))
elif "Fe" in element:
mol_cations[:, i] = oxides_normalized[element] / (55.85 + 16)
elif "Ba" in element:
mol_cations[:, i] = oxides_normalized[element] / (137.33 + 16)
elif "Sr" in element:
mol_cations[:, i] = oxides_normalized[element] / (87.62 + 16)
elif "Mn" in element:
mol_cations[:, i] = oxides_normalized[element] / (54.94 + 16)
elif "Mg" in element:
mol_cations[:, i] = oxides_normalized[element] / (24.31 + 16)
elif "Ca" in element:
mol_cations[:, i] = oxides_normalized[element] / (40.08 + 16)
elif "Na" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / ((23 * 2) + 16)
elif "K" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / ((39.1 * 2) + 16)
elif "Ni" in element:
mol_cations[:, i] = oxides_normalized[element] / (58.69 + 16)
mol_cations = pd.DataFrame(mol_cations, columns=elements)
# Calculating the number of oxygens per cation in the formula
mol_oxygens = np.zeros(mol_cations.shape)
for i, element in zip(range(len(elements)), elements):
if "Si" in element:
mol_oxygens[:, i] = mol_cations[element] * 2
elif "Ti" in element:
mol_oxygens[:, i] = mol_cations[element] * 2
elif "Al" in element:
mol_oxygens[:, i] = mol_cations[element] * (3 / 2)
elif "Cr" in element:
mol_oxygens[:, i] = mol_cations[element] * (3 / 2)
elif "Fe" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Ba" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Sr" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Mn" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Mg" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Ca" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Na" in element:
mol_oxygens[:, i] = mol_cations[element] * (1 / 2)
elif "K" in element:
mol_oxygens[:, i] = mol_cations[element] * (1 / 2)
elif "Ni" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
mol_oxygens = pd.DataFrame(mol_oxygens, columns=elements)
# number of oxygens per cation, normalized to the ideal number of oxygens specified above
norm_oxygens = (mol_oxygens * n_oxygens).div(
mol_oxygens.sum(axis="columns"), axis="rows"
)
# calculate the mole cations of each oxide normalized to the number of ideal oxygens
norm_cations = np.zeros(norm_oxygens.shape)
for i, element in zip(range(len(elements)), elements):
if "Si" in element:
norm_cations[:, i] = norm_oxygens[element] / 2
elif "Ti" in element:
norm_cations[:, i] = norm_oxygens[element] / 2
elif "Al" in element:
norm_cations[:, i] = norm_oxygens[element] / (3 / 2)
elif "Cr" in element:
norm_cations[:, i] = norm_oxygens[element] / (3 / 2)
elif "Fe" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Ba" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Sr" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Mn" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Mg" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Ca" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Na" in element:
norm_cations[:, i] = norm_oxygens[element] / (1 / 2)
elif "K" in element:
norm_cations[:, i] = norm_oxygens[element] / (1 / 2)
elif "Ni" in element:
norm_cations[:, i] = norm_oxygens[element]
cations = []
# Get the cations by taking the first two | |
data['images'] = hdf5_file.create_dataset("images", list((size,size)) + [num_slices], dtype=np.float32)
data['labels'] = hdf5_file.create_dataset("labels", list((size,size)) + [num_slices], dtype=np.uint8)
#data = {}
#num_slices = count_slices(folder_list, idx_start, idx_end)
#data['images'] = hdf5_file.create_dataset("images", list((size,size)) + [num_slices], dtype=np.float32)
#data['labels'] = hdf5_file.create_dataset("labels", list((size,size)) + [num_slices], dtype=np.uint8)
# ===============================
# initialize lists
# ===============================
label_list = []
image_list = []
nx_list = []
ny_list = []
nz_list = []
px_list = []
py_list = []
pz_list = []
pat_names_list = []
# ===============================
# ===============================
logging.info('Parsing image files')
patient_counter = 0
write_buffer = 0
counter_from = 0
for folder in folder_list:
patient_counter += 1
logging.info('================================')
logging.info('Doing: %s' % folder)
patname = folder.split('/')[-1]
pat_names_list.append(patname)
image_t1, _, image_t1_hdr = utils.load_nii(folder + f'/{patname}_t1.nii.gz')
image_t1ce, _, image_t1ce_hdr = utils.load_nii(folder + f'/{patname}_t1ce.nii.gz')
image_t2, _, image_t2_hdr = utils.load_nii(folder + f'/{patname}_t2.nii.gz')
image_flair, _, image_flair_hdr = utils.load_nii(folder + f'/{patname}_flair.nii.gz')
px_list.append(float(image_t1_hdr.get_zooms()[0]))
py_list.append(float(image_t1_hdr.get_zooms()[1]))
pz_list.append(float(image_t1_hdr.get_zooms()[2]))
nifti_img_path = preprocessing_folder + '/Individual_NIFTI/' + patname + '/'
if not os.path.exists(nifti_img_path):
utils.makefolder(nifti_img_path)
#utils.save_nii(img_path = nifti_img_path + '_img_t1.nii.gz', data = image_t1, affine = np.eye(4))
#utils.save_nii(img_path = nifti_img_path + '_img_t1ce.nii.gz', data = image_t1ce, affine = np.eye(4))
#utils.save_nii(img_path = nifti_img_path + '_img_t2.nii.gz', data = image_t2, affine = np.eye(4))
#utils.save_nii(img_path = nifti_img_path + '_img_flair.nii.gz', data = image_flair, affine = np.eye(4))
# ================================
# do bias field correction
# ================================
input_img_t1 = nifti_img_path + patname + '_img_t1.nii.gz'
output_img_t1 = nifti_img_path + patname + '_img_t1_n4.nii.gz'
input_img_t1ce = nifti_img_path + patname + '_img_t1ce.nii.gz'
output_img_t1ce = nifti_img_path + patname + '_img_t1ce_n4.nii.gz'
input_img_t2 = nifti_img_path + patname + '_img_t2.nii.gz'
output_img_t2 = nifti_img_path + patname + '_img_t2_n4.nii.gz'
input_img_flair = nifti_img_path + patname + '_img_flair.nii.gz'
output_img_flair = nifti_img_path + patname + '_img_flair_n4.nii.gz'
# If bias corrected image does not exist, do it now
for input_img, output_img in zip([input_img_t1, input_img_t1ce, input_img_t2, input_img_flair], [output_img_t1, output_img_t1ce, output_img_t2, output_img_flair]):
if os.path.isfile(output_img):
img = utils.load_nii(img_path = output_img)[0]
else:
subprocess.call(["/itet-stor/arismu/bmicdatasets_bmicnas01/Sharing/N4_th", input_img, output_img])
img = utils.load_nii(img_path = output_img)[0]
if input_img == input_img_t1:
img_t1_n4 = img
img_t1_n4 = utils.normalise_image(img_t1_n4, norm_type='div_by_max')
elif input_img == input_img_t1ce:
img_t1ce_n4 = img
img_t1ce_n4 = utils.normalise_image(img_t1ce_n4, norm_type='div_by_max')
elif input_img == input_img_t2:
img_t2_n4 = img
img_t2_n4 = utils.normalise_image(img_t2_n4, norm_type='div_by_max')
elif input_img == input_img_flair:
img_flair_n4 = img
img_flair_n4 = utils.normalise_image(img_flair_n4, norm_type='div_by_max')
image_n4 = np.concatenate((img_t1_n4, img_t1ce_n4), axis = 2)
image_n4 = np.concatenate((image_n4, img_t2_n4), axis = 2)
image_n4 = np.concatenate((image_n4, img_flair_n4), axis = 2)
nx_list.append(image_n4.shape[0])
ny_list.append(image_n4.shape[1])
nz_list.append(image_n4.shape[2])
# ================================
# read the labels
# ================================
label, _, _ = utils.load_nii(folder + f'/{patname}_seg.nii.gz')
label_temp = np.concatenate((label, label), axis = 2) #concatenate 3 times since all 4 image types share the same label
label_temp = np.concatenate((label_temp, label), axis = 2)
label = np.concatenate((label_temp, label), axis = 2)
if not os.path.isfile(nifti_img_path + patname + '_lbl.nii.gz'):
utils.save_nii(img_path = nifti_img_path + patname + '_lbl.nii.gz', data = label, affine = np.eye(4))
### PROCESSING LOOP FOR SLICE-BY-SLICE 2D DATA ###################
for zz in range(image_n4.shape[2]):
#no rescaling needed since all images (SD & TDs) have same scale/dimensions already
#image_cropped = crop_or_pad_slice_to_size(img_normalised[:, :, zz], size, size)
#label_cropped = crop_or_pad_slice_to_size(label[:, :, zz], size, size)
image_list.append(image_n4[:, :, zz])
label_list.append(label[:, :, zz])
write_buffer += 1
if write_buffer >= MAX_WRITE_BUFFER:
counter_to = counter_from + write_buffer
_write_range_to_hdf5(data,
image_list,
label_list,
counter_from,
counter_to)
_release_tmp_memory(image_list,
label_list)
# update counters
counter_from = counter_to
write_buffer = 0
#logging.info('Writing remaining data')
#counter_to = counter_from + write_buffer
#_write_range_to_hdf5(data, image_list, label_list, counter_from, counter_to)
# _release_tmp_memory(image_list, label_list)
hdf5_file.create_dataset('nx', data=np.asarray(nx_list, dtype=np.uint16))
hdf5_file.create_dataset('ny', data=np.asarray(ny_list, dtype=np.uint16))
hdf5_file.create_dataset('nz', data=np.asarray(nz_list, dtype=np.uint16))
hdf5_file.create_dataset('px', data=np.asarray(px_list, dtype=np.float32))
hdf5_file.create_dataset('py', data=np.asarray(py_list, dtype=np.float32))
hdf5_file.create_dataset('pz', data=np.asarray(pz_list, dtype=np.float32))
hdf5_file.create_dataset('patnames', data=np.asarray(pat_names_list, dtype="S20"))
# After test train loop:
logging.info('Test TD1 loop done')
hdf5_file.close()
# ===============================
# TD2
# ===============================
def load_test_td2_data(input_folder,
preproc_folder,
size,
target_resolution,
force_overwrite = False):
# ===============================
# create the pre-processing folder, if it does not exist
# ===============================
utils.makefolder(preproc_folder)
# ===============================
# file to create or directly read if it already exists
# ===============================
test_td2_file_name = 'data_test_td2.hdf5'
test_td2_file_path = os.path.join(preproc_folder, test_td2_file_name)
# ===============================
# if the images have not already been extracted, do so
# ===============================
if not os.path.exists(test_td2_file_path) or force_overwrite:
logging.info('This configuration of protocol and data indices has not yet been preprocessed')
logging.info('Preprocessing now...')
prepare_test_td2_data(input_folder,
preproc_folder,
test_td2_file_path,
size,
target_resolution
)
else:
logging.info('Already preprocessed this configuration. Loading now...')
return h5py.File(test_td2_file_path, 'r')
# ===============================================================
# Main function that prepares a dataset from the raw challenge data to an hdf5 dataset.
# Extract the required files from their zipped directories
# ===============================================================
def prepare_test_td2_data(input_folder,
preprocessing_folder,
test_td2_file_path,
size,
target_resolution
):
# ===============================
# create a hdf5 file
# ===============================
hdf5_file = h5py.File(test_td2_file_path, "w")
# ===============================
# read all the patient folders from the base input folder
# ===============================
logging.info('Counting files and parsing meta data...')
training_folder = input_folder + 'MICCAI_FeTS2021_TrainingData/'
folder_list = []
test_ids_td2 = [204, 199, 200, 201, 202, 203, 206, 211, 208, 209, 210, 198, 212, 213, 207, 197, 205, 195, 181, 182, 183, 184, 185, 187, 188,
186, 189, 190, 191, 192, 193, 194, 180, 196]
num_slices = 0
for folder in os.listdir(training_folder):
if not (folder.lower().endswith('.csv') or folder.lower().endswith('.md')):
folder_path = os.path.join(training_folder, folder)
patient_id = int(folder.split('_')[-1])
if os.path.isdir(folder_path):
if patient_id in test_ids_td2 :
folder_list.append(folder_path)
for _, _, fileList in os.walk(folder_path):
for filename in fileList:
if filename.lower().endswith('t1.nii.gz'):
image_t1, _, _ = utils.load_nii(training_folder + folder + '/' + filename)
num_slices += image_t1.shape[2]
elif filename.lower().endswith('t1ce.nii.gz'):
image_t1ce, _, _ = utils.load_nii(training_folder + folder + '/' + filename)
num_slices += image_t1ce.shape[2]
elif filename.lower().endswith('t2.nii.gz'):
image_t2, _, _= utils.load_nii(training_folder + folder + '/' + filename)
num_slices += image_t2.shape[2]
elif filename.lower().endswith('flair.nii.gz'):
image_flair, _, _ = utils.load_nii(training_folder + folder + '/' + filename)
num_slices += image_flair.shape[2]
# ===============================
# Create datasets for images and labels
# ===============================
data = {}
data['images'] = hdf5_file.create_dataset("images", list((size,size)) + [num_slices], dtype=np.float32)
data['labels'] = hdf5_file.create_dataset("labels", list((size,size)) + [num_slices], dtype=np.uint8)
#data = {}
#num_slices = count_slices(folder_list, idx_start, idx_end)
#data['images'] = hdf5_file.create_dataset("images", list((size,size)) + [num_slices], dtype=np.float32)
#data['labels'] = hdf5_file.create_dataset("labels", list((size,size)) + [num_slices], dtype=np.uint8)
# ===============================
# initialize lists
# ===============================
label_list = []
image_list = []
nx_list = []
ny_list = []
nz_list = []
px_list = []
py_list = []
pz_list = []
pat_names_list = []
# ===============================
# ===============================
logging.info('Parsing image files')
patient_counter = 0
write_buffer = 0
counter_from = 0
for folder in folder_list:
patient_counter += 1
logging.info('================================')
logging.info('Doing: %s' % folder)
patname = folder.split('/')[-1]
pat_names_list.append(patname)
image_t1, _, image_t1_hdr = utils.load_nii(folder + f'/{patname}_t1.nii.gz')
image_t1ce, _, image_t1ce_hdr = utils.load_nii(folder + f'/{patname}_t1ce.nii.gz')
image_t2, _, image_t2_hdr = utils.load_nii(folder + f'/{patname}_t2.nii.gz')
image_flair, _, image_flair_hdr = utils.load_nii(folder + f'/{patname}_flair.nii.gz')
px_list.append(float(image_t1_hdr.get_zooms()[0]))
py_list.append(float(image_t1_hdr.get_zooms()[1]))
pz_list.append(float(image_t1_hdr.get_zooms()[2]))
nifti_img_path = preprocessing_folder + '/Individual_NIFTI/' + patname + '/'
if not os.path.exists(nifti_img_path):
utils.makefolder(nifti_img_path)
#utils.save_nii(img_path = nifti_img_path + '_img_t1.nii.gz', data = image_t1, affine = np.eye(4))
#utils.save_nii(img_path = nifti_img_path + '_img_t1ce.nii.gz', data = image_t1ce, affine = np.eye(4))
#utils.save_nii(img_path = nifti_img_path + '_img_t2.nii.gz', data = image_t2, affine = np.eye(4))
#utils.save_nii(img_path = nifti_img_path + '_img_flair.nii.gz', data = image_flair, affine = np.eye(4))
# ================================
# do bias field correction
# ================================
input_img_t1 = nifti_img_path + patname + '_img_t1.nii.gz'
output_img_t1 = nifti_img_path + patname + '_img_t1_n4.nii.gz'
input_img_t1ce = nifti_img_path + patname + '_img_t1ce.nii.gz'
output_img_t1ce = nifti_img_path + patname + '_img_t1ce_n4.nii.gz'
input_img_t2 = nifti_img_path + patname + '_img_t2.nii.gz'
output_img_t2 = nifti_img_path + patname + '_img_t2_n4.nii.gz'
input_img_flair = nifti_img_path + patname + '_img_flair.nii.gz'
output_img_flair = nifti_img_path + patname + '_img_flair_n4.nii.gz'
# If bias corrected image does not exist, do it now
for input_img, output_img in zip([input_img_t1, input_img_t1ce, input_img_t2, input_img_flair], [output_img_t1, output_img_t1ce, output_img_t2, output_img_flair]):
if os.path.isfile(output_img):
img = utils.load_nii(img_path = output_img)[0]
else:
subprocess.call(["/itet-stor/arismu/bmicdatasets_bmicnas01/Sharing/N4_th", input_img, output_img])
img = utils.load_nii(img_path = output_img)[0]
if input_img == input_img_t1:
img_t1_n4 = img
img_t1_n4 = utils.normalise_image(img_t1_n4, norm_type='div_by_max')
elif input_img == input_img_t1ce:
img_t1ce_n4 = img
img_t1ce_n4 = utils.normalise_image(img_t1ce_n4, norm_type='div_by_max')
elif input_img == input_img_t2:
img_t2_n4 = img
img_t2_n4 = utils.normalise_image(img_t2_n4, norm_type='div_by_max')
elif input_img == input_img_flair:
img_flair_n4 = img
img_flair_n4 = utils.normalise_image(img_flair_n4, norm_type='div_by_max')
image_n4 = np.concatenate((img_t1_n4, img_t1ce_n4), axis = 2)
image_n4 = np.concatenate((image_n4, img_t2_n4), axis = 2)
image_n4 = np.concatenate((image_n4, img_flair_n4), axis = 2)
nx_list.append(image_n4.shape[0])
ny_list.append(image_n4.shape[1])
nz_list.append(image_n4.shape[2])
# ================================
# read the labels
# ================================ | |
# with user-defined error recovery
from pprint import pprint
from random import randint
from threading import Thread,Lock
mutex = Lock()
def import_grammar(fileHandle):
G,T,Nt = [],[],[]
for lines in fileHandle:
production = lines.split(' -> ')
if production[0] not in Nt: Nt.append(production[0])
listStr = list(production[1])
del listStr[-1]
production[1] = ''.join(i for i in listStr)
for char in production[1]:
if 65<=ord(char) and ord(char)<= 90:
if char not in Nt: Nt.append(char)
else:
if char not in T: T.append(char)
if production not in G: G.append((production[0],production[1]))
T.append('$')
return G,T,Nt
def closure(I,G,Nt):
J = [p for p in I]
while True:
J1 = [x for x in J]
for x in J1:
handle = list(x[1])
a = x[2]
k = handle.index('.')
if k+1!=len(handle):
if handle[k+1] in Nt:
for p in G:
beta = ''.join(handle[m] for m in range(k+2,len(handle)))
b = list(beta+a)[0]
if p[0] == handle[k+1]:
new_p = (p[0],'.'+p[1],b)
if new_p not in J1: J1.append(new_p)
flag = True
for x in J1:
if x not in J:
flag = False
J.append(x)
if flag: break
return J
def goto(I,G,X,Nt):
W = []
for x in I:
handle = list(x[1])
k = handle.index('.')
if k != len(handle)-1:
if handle[k+1] == X:
S1 = ''.join([handle[i] for i in range(k)])
S2 = ''.join([handle[i] for i in range(k+2,len(handle))])
W.append((x[0],S1+X+'.'+S2,x[2]))
return closure(W,G,Nt)
def items(G,T,Nt):
C = [ closure([(G[0][0],'.'+G[0][1],'$')],G,Nt) ]
action = {}
goto_k = {}
reduction_states = {}
while True:
C1 = [ I for I in C ]
for I in C1:
for X in T:
goto_list = goto(I,G,X,Nt)
if len(goto_list)!=0 and goto_list not in C1:
C1.append(goto_list)
if C1.index(I) not in action: action[C1.index(I)] = {}
if X not in action[C1.index(I)]:
action[C1.index(I)][X] = C1.index(goto_list)
elif goto_list in C1:
if C1.index(I) not in action: action[C1.index(I)] = {}
if X not in action[C1.index(I)]:
action[C1.index(I)][X] = C1.index(goto_list)
for I in C1:
for X in Nt:
goto_list = goto(I,G,X,Nt)
if len(goto_list)!=0 and goto_list not in C1:
C1.append(goto_list)
if C1.index(I) not in goto_k: goto_k[C1.index(I)] = {}
if X not in goto_k[C1.index(I)]:
goto_k[C1.index(I)][X] = C1.index(goto_list)
elif goto_list in C1:
if C1.index(I) not in goto_k: goto_k[C1.index(I)] = {}
if X not in goto_k[C1.index(I)]:
goto_k[C1.index(I)][X] = C1.index(goto_list)
flag = True
for x in C1:
if x not in C:
flag = False
C.append(x)
if flag: break
for state in range(len(C)):
reduction_states[state] = {}
for production in C[state]:
if production[1][len(production[1])-1] == '.':
rhs = list(production[1])
del rhs[-1]
rhsStr = ''.join(i for i in rhs)
Pp = (production[0],rhsStr)
reduction_states[state][production[2]] = Pp
accept_state = 0
for x in reduction_states:
if '$' in reduction_states[x]:
if reduction_states[x]['$'] == G[0]:
accept_state = x
break
return C,action,goto_k,reduction_states,accept_state
def driver():
fileHandle = open('grammarinput3.txt')
G,T,Nt = import_grammar(fileHandle)
print T,Nt
C,action_list,goto_list,reduction_states,accept_state = items(G,T,Nt)
print 'Canonical states'
for i in range(len(C)): print i,C[i]
print 'Action list'
pprint(action_list)
print 'Goto list'
pprint(goto_list)
print 'Reduction states'
pprint(reduction_states)
print 'Accept state',accept_state
stack = [0]
symbol_stack=['$']
input_str = raw_input('Enter some string ')+'$'
i,top = 0,0
# LR(1) AUTOMATON PARSING
def automatonThread(symbol_stack,stack,input_str,i,top):
global mutex
mutex.acquire()
print 'STATE','INPUT','SYMBOL_STACK','ACTION'
while True:
print 'Input string',input_str
s = stack[top]
try:
print s,input_str[i] if i != len(input_str) else 'Finish',symbol_stack,
if s == accept_state:
print 'accept'
mutex.release()
break
elif len(reduction_states[s]) != 0 and input_str[i] in reduction_states[s]:
A,beta = reduction_states[s][input_str[i]]
print 'reduce',A,'->',beta
for j in range(len(beta)):
del stack[top]
del symbol_stack[top]
t = stack[top]
stack.insert(top,goto_list[t][A])
symbol_stack.insert(top,A)
else:
a = input_str[i]
stack.insert(top,action_list[s][a])
symbol_stack.insert(top,a)
print 'shift',action_list[s][a]
i = i + 1
except:
print '\nSyntax error detected'
print 'Expected any of the following'
errors = []
if s in action_list:
for prod in action_list[s]: errors.append(prod)
elif s in reduction_states:
for prod in reduction_states[s]: errors.append(prod)
for j in range(len(errors)): print j+1,')',errors[j] if errors[j] != '$' else 'End of line'
threadList = [None for j in range(len(errors))]
print 'Got',input_str[i],'at position',i
mutex.release()
for k in range(len(errors)):
r = errors[k]
e = input_str[i]
buffer_str = ''.join(input_str[j] for j in range(i))+r+''.join(input_str[j] for j in range(i+1,len(input_str)))
if i == len(buffer_str) - 1 and e == '$': buffer_str = buffer_str + '$'
print 'Current adjusted input',buffer_str
threadList[k] = Thread(target=automatonThread,args=(symbol_stack,stack,buffer_str,i,top,))
for k in range(len(errors)): threadList[k].start()
for k in range(len(errors)): threadList[k].join()
break
automatonThread(symbol_stack,stack,input_str,i,top)
driver()
# The output of the program
# ['a', 'b', '$'] ['G', 'S']
# Canonical states
# 0 [('G', '.S', '$'), ('S', '.abS', '$'), ('S', '.baS', '$'), ('S', '.aaSbbS', '$'), ('S', '.bbSaaS', '$'), ('S', '.', '$')]
# 1 [('S', 'a.bS', '$'), ('S', 'a.aSbbS', '$')]
# 2 [('S', 'b.aS', '$'), ('S', 'b.bSaaS', '$')]
# 3 [('S', 'aa.SbbS', '$'), ('S', '.abS', 'b'), ('S', '.baS', 'b'), ('S', '.aaSbbS', 'b'), ('S', '.bbSaaS', 'b'), ('S', '.', 'b')]
# 4 [('S', 'ab.S', '$'), ('S', '.abS', '$'), ('S', '.baS', '$'), ('S', '.aaSbbS', '$'), ('S', '.bbSaaS', '$'), ('S', '.', '$')]
# 5 [('S', 'ba.S', '$'), ('S', '.abS', '$'), ('S', '.baS', '$'), ('S', '.aaSbbS', '$'), ('S', '.bbSaaS', '$'), ('S', '.', '$')]
# 6 [('S', 'bb.SaaS', '$'), ('S', '.abS', 'a'), ('S', '.baS', 'a'), ('S', '.aaSbbS', 'a'), ('S', '.bbSaaS', 'a'), ('S', '.', 'a')]
# 7 [('S', 'a.bS', 'b'), ('S', 'a.aSbbS', 'b')]
# 8 [('S', 'b.aS', 'b'), ('S', 'b.bSaaS', 'b')]
# 9 [('S', 'a.bS', 'a'), ('S', 'a.aSbbS', 'a')]
# 10 [('S', 'b.aS', 'a'), ('S', 'b.bSaaS', 'a')]
# 11 [('S', 'aa.SbbS', 'b'), ('S', '.abS', 'b'), ('S', '.baS', 'b'), ('S', '.aaSbbS', 'b'), ('S', '.bbSaaS', 'b'), ('S', '.', 'b')]
# 12 [('S', 'ab.S', 'b'), ('S', '.abS', 'b'), ('S', '.baS', 'b'), ('S', '.aaSbbS', 'b'), ('S', '.bbSaaS', 'b'), ('S', '.', 'b')]
# 13 [('S', 'ba.S', 'b'), ('S', '.abS', 'b'), ('S', '.baS', 'b'), ('S', '.aaSbbS', 'b'), ('S', '.bbSaaS', 'b'), ('S', '.', 'b')]
# 14 [('S', 'bb.SaaS', 'b'), ('S', '.abS', 'a'), ('S', '.baS', 'a'), ('S', '.aaSbbS', 'a'), ('S', '.bbSaaS', 'a'), ('S', '.', 'a')]
# 15 [('S', 'aa.SbbS', 'a'), ('S', '.abS', 'b'), ('S', '.baS', 'b'), ('S', '.aaSbbS', 'b'), ('S', '.bbSaaS', 'b'), ('S', '.', 'b')]
# 16 [('S', 'ab.S', 'a'), ('S', '.abS', 'a'), ('S', '.baS', 'a'), ('S', '.aaSbbS', 'a'), ('S', '.bbSaaS', 'a'), ('S', '.', 'a')]
# 17 [('S', 'ba.S', 'a'), ('S', '.abS', 'a'), ('S', '.baS', 'a'), ('S', '.aaSbbS', 'a'), ('S', '.bbSaaS', 'a'), ('S', '.', 'a')]
# 18 [('S', 'bb.SaaS', 'a'), ('S', '.abS', 'a'), ('S', '.baS', 'a'), ('S', '.aaSbbS', 'a'), ('S', '.bbSaaS', 'a'), ('S', '.', 'a')]
# 19 [('G', 'S.', '$')]
# 20 [('S', 'aaS.bbS', '$')]
# 21 [('S', 'abS.', '$')]
# 22 [('S', 'baS.', '$')]
# 23 [('S', 'bbS.aaS', '$')]
# 24 [('S', 'aaS.bbS', 'b')]
# 25 [('S', 'abS.', 'b')]
# 26 [('S', 'baS.', 'b')]
# 27 [('S', 'bbS.aaS', 'b')]
# 28 [('S', 'aaS.bbS', 'a')]
# 29 [('S', 'abS.', 'a')]
# 30 [('S', 'baS.', 'a')]
# 31 [('S', 'bbS.aaS', 'a')]
# 32 [('S', 'aaSb.bS', '$')]
# 33 [('S', 'bbSa.aS', '$')]
# 34 [('S', 'aaSb.bS', 'b')]
# 35 [('S', 'bbSa.aS', 'b')]
# 36 [('S', 'aaSb.bS', 'a')]
# 37 [('S', 'bbSa.aS', 'a')]
# 38 [('S', 'aaSbb.S', '$'), ('S', '.abS', '$'), ('S', '.baS', '$'), ('S', '.aaSbbS', '$'), ('S', '.bbSaaS', '$'), ('S', '.', '$')]
# 39 [('S', 'bbSaa.S', '$'), ('S', '.abS', '$'), ('S', '.baS', '$'), ('S', '.aaSbbS', '$'), ('S', '.bbSaaS', '$'), ('S', '.', '$')]
# 40 [('S', 'aaSbb.S', 'b'), ('S', '.abS', 'b'), ('S', '.baS', 'b'), ('S', '.aaSbbS', 'b'), ('S', '.bbSaaS', 'b'), ('S', '.', 'b')]
# 41 [('S', 'bbSaa.S', 'b'), ('S', '.abS', 'b'), ('S', '.baS', 'b'), ('S', '.aaSbbS', 'b'), ('S', '.bbSaaS', 'b'), ('S', '.', 'b')]
# 42 [('S', 'aaSbb.S', 'a'), ('S', '.abS', 'a'), ('S', '.baS', 'a'), ('S', '.aaSbbS', 'a'), ('S', '.bbSaaS', 'a'), ('S', '.', 'a')]
# 43 [('S', 'bbSaa.S', 'a'), ('S', '.abS', 'a'), ('S', '.baS', 'a'), ('S', '.aaSbbS', 'a'), ('S', '.bbSaaS', 'a'), ('S', '.', 'a')]
# 44 [('S', 'aaSbbS.', '$')]
# 45 [('S', 'bbSaaS.', '$')]
# 46 [('S', 'aaSbbS.', 'b')]
# 47 [('S', 'bbSaaS.', 'b')]
# 48 [('S', 'aaSbbS.', 'a')]
# 49 [('S', 'bbSaaS.', 'a')]
# Action list
# {0: {'a': 1, 'b': 2},
# 1: {'a': 3, 'b': 4},
# 2: {'a': 5, 'b': 6},
# 3: {'a': 7, 'b': 8},
# 4: {'a': 1, 'b': 2},
# 5: {'a': 1, 'b': 2},
# 6: {'a': 9, 'b': 10},
# 7: {'a': 11, 'b': 12},
# 8: {'a': 13, 'b': 14},
# 9: {'a': 15, 'b': 16},
# 10: {'a': 17, 'b': 18},
# 11: {'a': 7, 'b': 8},
# 12: {'a': 7, 'b': 8},
# 13: {'a': 7, 'b': 8},
# 14: {'a': 9, 'b': 10},
# 15: {'a': 7, 'b': 8},
# 16: {'a': 9, 'b': 10},
# 17: {'a': 9, 'b': 10},
# 18: {'a': 9, 'b': 10},
# 20: {'b': 32},
# 23: {'a': 33},
# 24: {'b': 34},
# 27: {'a': 35},
# 28: {'b': 36},
# 31: {'a': 37},
# 32: {'b': 38},
# | |
# ==============================CS-271P==================================
# FILE: MyAI.py
#
# AUTHOR: <NAME>
#
# DESCRIPTION: This file contain the core algorithm to solve the game
# Refer below at getAction(self, number: int) for details
# ==============================CS-271P==================================
from AI import AI
from Action import Action
from collections import deque
from collections import Counter
import random
class MyAI( AI ):
# Tile class
class __Tile():
""" create templates for each tile on the board for knowledge base """
mine = False # if the tile is a mine
covered = True # if the tile is covered
flag = False # is the tile flagged
number = -100 # what is the percept number
def __init__(self, rowDimension, colDimension, totalMines, startX, startY):
# get map dimensions, number of mines, and coveredTiles
self.row = rowDimension
self.col = colDimension
self.totalMines = totalMines
self.coveredTiles = rowDimension * colDimension
# moves
self.movesMade = 1
self.movesLimit = 0
# lastTile is self.board[c][r] instance
self.cLast = startX
self.rLast = startY
self.lastTile = None
# lastAction is an action number
self.lastAction = None
self.flagsLeft = totalMines
# first move coordinates
self.startX = startX
self.startY = startY
# create a queue for safe Tiles
self.safeQ = deque([])
self.unknown = [(j, i) for i in range(self.row) for j in range(self.col)]
self.prob = dict()
self.mines = []
self.minesLeft = totalMines
# the world
self.board = None
# create the board
self.__createBoard()
# since the first move is made, update on the agent's KB
self.__updateFirstMove()
def getAction(self, number: int) -> "Action Object":
""" input: an integer number as a perceptnumber from World.py act as a
hint output: action number, rowCoordinate x, columnCoordinate y
Total of 5 parts:
I: Patient 0
- Uncover all the surrounding tiles if the center tile is 0
II: Basic 3x3 deductions
- If the number of covered surrending tiles is qual to the percept
number for the center tile, then the covered tiles are mines
III: Multisquare algorithm
- Looking at each single tile in the frontier (tiles that are
unexplored by adjcent to a percept number), expand the deduction
method from part II to multiple surrounding neighbors that have
multural affecting tiles.
IV: Linear algebra
- Looking at the entier frontier and simplify it into a matrix. Use
the uncovered neighbor tiles' percept number as constraints. Each
constraint tells the max possible number of mines among its
surrounding tiles. Also, use the entire board's unexplored Tiles
and mines left as the final equation in the matrix. Solve or
reduce the matrix into row echelon form. Since each tile can
either be a mine or not, further deduction can be made.
V: Guesses
- Assign each unexplored tiles a probability based on the current
knowledge base (number of minesleft and surrouding constraints)
One exception - guess corners first since the probability that
a corner being a mines is low. """
# in the beginning, update KB with the received percept number
self.logLastMovePercept(number)
""" Part I - Patient 0 """
# if the number is 0, then the last tile's surrounding is safe
if (number == 0):
""" if in bounds, not the last move,
not already in safeQ, and covered"""
for col in range(self.cLast-1, self.cLast+2):
for row in range(self.rLast-1, self.rLast+2):
if (self.__isInBounds(col, row) and \
not (col == self.cLast and row == self.rLast)) and \
((col, row) not in self.safeQ) and \
self.board[col][row].covered == True:
# add to the safeQ
self.safeQ.append((col, row))
# uncover all the safe ones
while (self.safeQ != deque([])):
# get the left most tile in the safe queue
cr = self.safeQ.popleft()
# update the knowledge base
self.logMove(AI.Action(1), cr[0], cr[1])
# return the action to the World
return Action(AI.Action(1), cr[0], cr[1])
""" Part II - Basic 3x3 deductions """
# locate the mine
for col in range(0, self.col):
for row in range(0, self.row):
""" If the total number of covered surrending the tile
is the percept number for the tile,
then the covered tile is the mine """
if (self.board[col][row].covered == False and \
self.board[col][row].number != 0 and \
self.board[col][row].number == \
self.surCovered(col, row)[0]):
# flag those as mines
mines = self.surCovered(col, row)[1]
for _ in mines:
self.markMines(_)
for col in range(0, self.col):
for row in range(0, self.row):
# percept == known mines
# surrounding unexplored - known mines > 0
if ((self.board[col][row].number == \
self.surMines(col, row)[0]) and \
(self.surCovered(col, row)[0] - \
self.surMines(col, row)[0] > 0)):
# get the unexplored tiles and known mines
covered = self.surCovered(col, row)[1]
mines = self.surMines(col, row)[1]
for _ in covered:
# if the mines are all discovered, the rest of the tiles
# must be safe
if (_ not in mines) and (_ not in self.safeQ):
self.safeQ.append(_)
# uncover all the safe ones
while (self.safeQ != deque([])):
# get the left most tile in the safe queue
cr = self.safeQ.popleft()
# update the knowledge base
self.logMove(AI.Action(1), cr[0], cr[1])
# return the action to the World
return Action(AI.Action(1), cr[0], cr[1])
""" Part III: neighbor_test (multisquare algorithm) """
for col in range(self.col):
for row in range(self.row):
if self.board[col][row].number > 0 and \
self.surUnknown(col, row)[0] > 0:
neigh = self.neighbor_test(col, row)
if neigh is not None and neigh != []:
for _ in neigh:
if _ in self.unknown and _ not in self.safeQ:
self.safeQ.append(_)
# uncover all the safe ones
while (self.safeQ != deque([])):
# get the left most tile in the safe queue
cr = self.safeQ.popleft()
# update the knowledge base
self.logMove(AI.Action(1), cr[0], cr[1])
# return the action to the World
return Action(AI.Action(1), cr[0], cr[1])
""" Part IV: linear algebra """
# initialize contraints, frontier, and frontier encoding for the matrix
constraints = []
frontier = []
frontierMap = dict()
unknown = self.unknown
totalMinesLeft = self.minesLeft
# get the current contraints
constraints = self.constraints()
constraintsCount = len(constraints)
# get the current frontier
frontier = self.frontier()
frontierCount = len(frontier)
# each row is a contraint
# the additional constraint is the entire unexplored tiles
rowCount = constraintsCount + 1
# each column is an explored tile on the board
columnCount = len(unknown) + 1
# if there are constraints and the variables, construct the matrix
if columnCount != 1 and rowCount != 1:
# create a list of column code for each variable tile plus the
# general rule (unexplored tiles and total mines left)
columnHeader = [x for x in range(columnCount)]
frontierHeader = columnHeader[:-1]
# encode each tile into a dictionary
col_to_tile = dict(zip(frontierHeader, unknown))
tile_to_col = dict(zip(unknown, frontierHeader))
# initialize the matrix
matrix = [[0 for i in range(columnCount)] for j in range(rowCount)]
# construct the matrix
row = 0
for constraint in constraints:
# list out the tiles to be explored for each constraint
sub_frontier = self.surUnknown(constraint[0], constraint[1])[1]
# mark the coordinates
for tile in sub_frontier:
# encode each column into tile coordinates
col = tile_to_col.get(tile)
# update the matrix coordinate value
matrix[row][col] = 1
# update the last column with the actual number of mines
# which is (percept - number of mines already discovered)
minesCount = self.board[constraint[0]][constraint[1]].number - \
self.surMines(constraint[0], constraint[1])[0]
# update the last number as the effective percept number
matrix[row][-1] = minesCount
# move on to the next row
row += 1
# update the last row as the general rule
for i in range(columnCount):
matrix[row][i] = 1
matrix[-1][-1] = totalMinesLeft
# reduce to row echelon form, where the magic happens
self.reduceRowEchelon(matrix)
"""
Since each tile is either a mine or not, its value is binary. this
is useful to solve the matrix or at least some tile's value.
"""
safe = []
mines = []
for row in matrix:
last = row[-1]
onesCount = self.countMix(row[:-1])[0]
onesList = self.countMix(row[:-1])[1]
neg_onesCount = self.countMix(row[:-1])[2]
negList = self.countMix(row[:-1])[3]
# case when the total number of mines is 0
if last == 0:
# case when there are only possitive coefficients on the left
if onesCount > 0 and neg_onesCount == 0:
for col in onesList:
| |
# -*- coding: utf-8
from xml.dom import minidom
import xml.etree.ElementTree as ET
class Time(object):
"""
The Time command returns the current time on our servers.
Arguments:
none
"""
def __init__(self):
pass
def __str__(self):
return "time object"
def time(self):
"""
Returns an ElementTree object containing a time tag.
"""
return ET.Element('Time')
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.time(), encoding='unicode')
class Read(object):
"""
Use the read command to retrieve data from OpenAir.
Arguments:
type (str): a valid XML type
method (str): a valid read method
attribs (dict): a dictionary containing read attributes
filters (list): a list of filter types
return_fields (list): a list of fields to return
"""
def __init__(self, type, method, attribs, filters, fields):
self.type = type
self.method = method
self.attribs = attribs
self.filters = filters
self.fields = fields
def __str__(self):
return "%s (method: %s)" % (self.type, self.method, )
def read(self):
"""
Returns an ElementTree object containing a read tag,
as well as all the appropriate attributes and filters.
"""
elem = ET.Element('Read')
attribs = {}
attribs['type'] = self.type
attribs['method'] = self.method
# process all read attributes
for key in self.attribs:
attribs[key] = self.attribs[key]
# create filter information
if self.filters:
filter_list = []
field_list = []
for item in self.filters:
if item['filter']:
filter_list.append(item['filter'])
elif item['datatype']:
elem.append(item['datatype'])
if item['fieldname']:
field_list.append(item['fieldname'])
elem.append(item['datatype'])
if field_list:
attribs['field'] = ','.join(field_list)
if filter_list:
attribs['filter'] = ','.join(filter_list)
# add all attribs to the XML element
elem.attrib = attribs
# process return fields
if self.fields:
subelem = ET.SubElement(elem, '_Return')
for f in self.fields:
ET.SubElement(subelem, f)
return elem
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.read(), encoding='unicode')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='unicode')
class Filter(object):
"""
Creates a filter object for filtering read commands.
Arguments:
filter (str): the type of filter
field (str): the field to be filtered
datatype (obj): a valid XML element
"""
def __init__(self, filter, fieldname, datatype):
self.filter = filter
self.fieldname = fieldname
self.datatype = datatype.getDatatype()
def __str__(self):
return "filter object"
def getFilter(self):
"""
Returns a dictionary of filter criteria.
"""
f = {}
f['filter'] = self.filter
f['fieldname'] = self.fieldname
f['datatype'] = self.datatype
return f
class Report(object):
"""
Use the Report command to run a report and email a PDF copy
of a Timesheet, Envelope, or Saved report.
Arguments:
type (str): a valid XML type. Only Timesheet, Envelope
or Reportf datatypes are allowed
report (obj): a valid XML report element datatype
"""
def __init__(self, type, report):
self.type = type
self.report = report.getDatatype()
# type
@property
def type(self):
return self._type
@type.setter
def type(self, t):
if not t in ['Timesheet', 'Envelope', 'Reportf']:
raise Exception('type "%s" not supported' % t)
self._type = t
def getReport(self):
"""
Returns an ElementTree object containing report tags.
"""
elem = ET.Element('Report')
attribs = {}
attribs['type'] = self.type
elem.attrib = attribs
elem.append(self.report)
return elem
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.getReport(), encoding='unicode')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='unicode')
class Add(object):
"""
Use the Add command to add records.
Arguments:
type (str): a valid XML type
attrib (dict): a dictionary containing add attributes
datatype (obj): a valid Datatype() object
"""
def __init__(self, type, attribs, datatype):
self.type = type
self.attribs = attribs
self.datatype = datatype.getDatatype()
def __str__(self):
return 'Add "%s"' % self.type
# type
@property
def type(self):
return self._type
@type.setter
def type(self, t):
if t == 'User':
raise Exception(
'datatype "%s" not supported - use CreateUser' % t
)
if t == 'Company':
raise Exception(
'datatype "%s" not supported - use CreateAccount' % t
)
self._type = t
def add(self):
"""
Returns an ElementTree object containing add tags.
"""
elem = ET.Element('Add')
attribs = {}
attribs['type'] = self.type
# process all add attributes
for key in self.attribs:
attribs[key] = self.attribs[key]
elem.attrib = attribs
elem.append(self.datatype)
return elem
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.add(), encoding='unicode')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='unicode')
class Delete(object):
"""
Use the Delete command to delete records.
Arguments:
type (str): a valid XML type
datatype (obj): a valid Datatype() object
"""
def __init__(self, type, datatype):
self.type = type
self.datatype = datatype.getDatatype()
def __str__(self):
return 'Delete "%s"' % self.type
def delete(self):
"""
Returns an ElementTree object containing delete tags.
"""
elem = ET.Element('Delete')
attribs = {}
attribs['type'] = self.type
elem.attrib = attribs
elem.append(self.datatype)
return elem
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.delete(), encoding='unicode')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='unicode')
class Modify(object):
"""
Use the Modify command to change records.
Arguments:
type (str): a valid XML type
attrib (dict): a dictionary containing modify attributes
datatype (obj): a valid Datatype() object
"""
def __init__(self, type, attribs, datatype):
self.type = type
self.attribs = attribs
self.datatype = datatype.getDatatype()
def __str__(self):
return 'Modify "%s"' % self.type
def modify(self):
"""
Returns an ElementTree object containing modify tags.
"""
elem = ET.Element('Modify')
attribs = {}
attribs['type'] = self.type
# process all add attributes
for key in self.attribs:
attribs[key] = self.attribs[key]
elem.attrib = attribs
elem.append(self.datatype)
return elem
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.modify(), encoding='unicode')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='unicode')
class Submit(object):
"""
Use the Submit command to submit records.
Arguments:
type (str): a valid XML type
datatype (obj): a valid Datatype() object
approval (obj): a valid approval Datatype() object
"""
def __init__(self, type, datatype, approval):
self.type = type
self.datatype = datatype.getDatatype()
self.approval = approval.getDatatype()
def __str__(self):
return 'Submit "%s"' % self.type
# type
@property
def type(self):
return self._datatype
@type.setter
def type(self, t):
if not t in ['Timesheet', 'Envelope']:
raise Exception('type "%s" not supported' % t)
self._datatype = t
def submit(self):
"""
Returns an ElementTree object containing submit tags.
"""
elem = ET.Element('Submit')
attribs = {}
attribs['type'] = self.type
elem.attrib = attribs
elem.append(self.datatype)
elem.append(self.approval)
return elem
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.submit(), encoding='unicode')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='unicode')
class CreateAccount(object):
"""
Use the CreateAccount command to create a new OpenAir account.
When a new account is created, the first user is also created
as the account administrator.
Arguments:
company (obj): a valid company Datatype() object
user (obj): a valid user Datatype() object
"""
def __init__(self, company, user):
self.company = company
self.user = user
def __str__(self):
return 'CreateAccount for "%s"' % self.company.fields['nickname']
# company
@property
def company(self):
return self._company
@company.setter
def company(self, c):
if not 'nickname' in c.fields:
raise Exception('"nickname" is a required Company field' % c)
self._company = c
# user
@property
def user(self):
return self._user
@user.setter
def user(self, u):
if (
not 'nickname' in u.fields or
not 'password' in u.fields or
not 'email' in u.fields
):
raise Exception(
'"nickname, password and email" are required User fields' % u
)
self._user = u
def create(self):
"""
Returns an ElementTree object containing submit tags.
"""
elem = ET.Element('CreateAccount')
elem.append(self.company.getDatatype())
elem.append(self.user.getDatatype())
return elem
def tostring(self):
"""
Return a string containing XML tags.
"""
return ET.tostring(self.create(), encoding='unicode')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='unicode')
class CreateUser(object):
"""
Use the CreateUser command to create a new OpenAir user.
Arguments:
company (obj): a valid company Datatype() object
user (obj): a valid user Datatype() object
"""
def __init__(self, company, user):
self.company = company
self.user = user
def __str__(self):
return 'CreateUser "%s"' % self.user.fields['nickname']
# company
@property
def company(self):
return self._company
@company.setter
def company(self, c):
if not 'nickname' in c.fields:
raise Exception('"nickname" is a required Company field' % c)
self._company = c
# user
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.