body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
952856247591d7a508f952c425a38d8bc34c07cc3e4604c9ac6799e63d4d4d5b | def to_ir(self):
'\n No need to implement for now.\n '
raise NotImplementedError() | No need to implement for now. | ecosystem_tools/mindconverter/mindconverter/graph_based_converter/third_party_graph/input_node.py | to_ir | mindspore-ai/mindinsight | 216 | python | def to_ir(self):
'\n \n '
raise NotImplementedError() | def to_ir(self):
'\n \n '
raise NotImplementedError()<|docstring|>No need to implement for now.<|endoftext|> |
9b0fb7d7ab2909c9c1c37f8048155a844d67bf4a662573f06190a3db48197680 | def fit(self, train_set, val_set=None):
'Fit the model to observations.\n\n Parameters\n ----------\n train_set: :obj:`cornac.data.Dataset`, required\n User-Item preference data as well as additional modalities.\n\n val_set: :obj:`cornac.data.Dataset`, optional, default: None\n User-Item preference data for model selection purposes (e.g., early stopping).\n\n Returns\n -------\n self : object\n '
Recommender.fit(self, train_set, val_set)
import torch
from .bivae import BiVAE, learn
self.device = (torch.device('cuda:0') if (self.use_gpu and torch.cuda.is_available()) else torch.device('cpu'))
if self.trainable:
feature_dim = {'user': None, 'item': None}
if self.cap_priors.get('user', False):
if (train_set.user_feature is None):
raise ValueError('CAP priors for users is set to True but no user features are provided')
else:
feature_dim['user'] = train_set.user_feature.feature_dim
if self.cap_priors.get('item', False):
if (train_set.item_feature is None):
raise ValueError('CAP priors for items is set to True but no item features are provided')
else:
feature_dim['item'] = train_set.item_feature.feature_dim
if (self.seed is not None):
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
if (not hasattr(self, 'bivaecf')):
num_items = train_set.matrix.shape[1]
num_users = train_set.matrix.shape[0]
self.bivae = BiVAE(k=self.k, user_encoder_structure=([num_items] + self.encoder_structure), item_encoder_structure=([num_users] + self.encoder_structure), act_fn=self.act_fn, likelihood=self.likelihood, cap_priors=self.cap_priors, feature_dim=feature_dim, batch_size=self.batch_size).to(self.device)
learn(self.bivae, self.train_set, n_epochs=self.n_epochs, batch_size=self.batch_size, learn_rate=self.learning_rate, beta_kl=self.beta_kl, verbose=self.verbose, device=self.device)
elif self.verbose:
print(('%s is trained already (trainable = False)' % self.name))
return self | Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object | cornac/models/bivaecf/recom_bivaecf.py | fit | xurong-liang/cornac | 597 | python | def fit(self, train_set, val_set=None):
'Fit the model to observations.\n\n Parameters\n ----------\n train_set: :obj:`cornac.data.Dataset`, required\n User-Item preference data as well as additional modalities.\n\n val_set: :obj:`cornac.data.Dataset`, optional, default: None\n User-Item preference data for model selection purposes (e.g., early stopping).\n\n Returns\n -------\n self : object\n '
Recommender.fit(self, train_set, val_set)
import torch
from .bivae import BiVAE, learn
self.device = (torch.device('cuda:0') if (self.use_gpu and torch.cuda.is_available()) else torch.device('cpu'))
if self.trainable:
feature_dim = {'user': None, 'item': None}
if self.cap_priors.get('user', False):
if (train_set.user_feature is None):
raise ValueError('CAP priors for users is set to True but no user features are provided')
else:
feature_dim['user'] = train_set.user_feature.feature_dim
if self.cap_priors.get('item', False):
if (train_set.item_feature is None):
raise ValueError('CAP priors for items is set to True but no item features are provided')
else:
feature_dim['item'] = train_set.item_feature.feature_dim
if (self.seed is not None):
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
if (not hasattr(self, 'bivaecf')):
num_items = train_set.matrix.shape[1]
num_users = train_set.matrix.shape[0]
self.bivae = BiVAE(k=self.k, user_encoder_structure=([num_items] + self.encoder_structure), item_encoder_structure=([num_users] + self.encoder_structure), act_fn=self.act_fn, likelihood=self.likelihood, cap_priors=self.cap_priors, feature_dim=feature_dim, batch_size=self.batch_size).to(self.device)
learn(self.bivae, self.train_set, n_epochs=self.n_epochs, batch_size=self.batch_size, learn_rate=self.learning_rate, beta_kl=self.beta_kl, verbose=self.verbose, device=self.device)
elif self.verbose:
print(('%s is trained already (trainable = False)' % self.name))
return self | def fit(self, train_set, val_set=None):
'Fit the model to observations.\n\n Parameters\n ----------\n train_set: :obj:`cornac.data.Dataset`, required\n User-Item preference data as well as additional modalities.\n\n val_set: :obj:`cornac.data.Dataset`, optional, default: None\n User-Item preference data for model selection purposes (e.g., early stopping).\n\n Returns\n -------\n self : object\n '
Recommender.fit(self, train_set, val_set)
import torch
from .bivae import BiVAE, learn
self.device = (torch.device('cuda:0') if (self.use_gpu and torch.cuda.is_available()) else torch.device('cpu'))
if self.trainable:
feature_dim = {'user': None, 'item': None}
if self.cap_priors.get('user', False):
if (train_set.user_feature is None):
raise ValueError('CAP priors for users is set to True but no user features are provided')
else:
feature_dim['user'] = train_set.user_feature.feature_dim
if self.cap_priors.get('item', False):
if (train_set.item_feature is None):
raise ValueError('CAP priors for items is set to True but no item features are provided')
else:
feature_dim['item'] = train_set.item_feature.feature_dim
if (self.seed is not None):
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
if (not hasattr(self, 'bivaecf')):
num_items = train_set.matrix.shape[1]
num_users = train_set.matrix.shape[0]
self.bivae = BiVAE(k=self.k, user_encoder_structure=([num_items] + self.encoder_structure), item_encoder_structure=([num_users] + self.encoder_structure), act_fn=self.act_fn, likelihood=self.likelihood, cap_priors=self.cap_priors, feature_dim=feature_dim, batch_size=self.batch_size).to(self.device)
learn(self.bivae, self.train_set, n_epochs=self.n_epochs, batch_size=self.batch_size, learn_rate=self.learning_rate, beta_kl=self.beta_kl, verbose=self.verbose, device=self.device)
elif self.verbose:
print(('%s is trained already (trainable = False)' % self.name))
return self<|docstring|>Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object<|endoftext|> |
c28f88cafc43feaff9b6e325f154430250b0493d668593e5bc5bc5ef71ae70b1 | def score(self, user_idx, item_idx=None):
'Predict the scores/ratings of a user for an item.\n\n Parameters\n ----------\n user_idx: int, required\n The index of the user for whom to perform score prediction.\n\n item_idx: int, optional, default: None\n The index of the item for which to perform score prediction.\n If None, scores for all known items will be returned.\n\n Returns\n -------\n res : A scalar or a Numpy array\n Relative scores that the user gives to the item or to all known items\n\n '
if (item_idx is None):
if self.train_set.is_unk_user(user_idx):
raise ScoreException(("Can't make score prediction for (user_id=%d)" % user_idx))
theta_u = self.bivae.mu_theta[user_idx].view(1, (- 1))
beta = self.bivae.mu_beta
known_item_scores = self.bivae.decode_user(theta_u, beta).cpu().numpy().ravel()
return known_item_scores
else:
if (self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx)):
raise ScoreException(("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx)))
theta_u = self.bivae.mu_theta[user_idx].view(1, (- 1))
beta_i = self.bivae.mu_beta[item_idx].view(1, (- 1))
pred = self.bivae.decode_user(theta_u, beta_i).cpu().numpy().ravel()
pred = scale(pred, self.train_set.min_rating, self.train_set.max_rating, 0.0, 1.0)
return pred | Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for which to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items | cornac/models/bivaecf/recom_bivaecf.py | score | xurong-liang/cornac | 597 | python | def score(self, user_idx, item_idx=None):
'Predict the scores/ratings of a user for an item.\n\n Parameters\n ----------\n user_idx: int, required\n The index of the user for whom to perform score prediction.\n\n item_idx: int, optional, default: None\n The index of the item for which to perform score prediction.\n If None, scores for all known items will be returned.\n\n Returns\n -------\n res : A scalar or a Numpy array\n Relative scores that the user gives to the item or to all known items\n\n '
if (item_idx is None):
if self.train_set.is_unk_user(user_idx):
raise ScoreException(("Can't make score prediction for (user_id=%d)" % user_idx))
theta_u = self.bivae.mu_theta[user_idx].view(1, (- 1))
beta = self.bivae.mu_beta
known_item_scores = self.bivae.decode_user(theta_u, beta).cpu().numpy().ravel()
return known_item_scores
else:
if (self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx)):
raise ScoreException(("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx)))
theta_u = self.bivae.mu_theta[user_idx].view(1, (- 1))
beta_i = self.bivae.mu_beta[item_idx].view(1, (- 1))
pred = self.bivae.decode_user(theta_u, beta_i).cpu().numpy().ravel()
pred = scale(pred, self.train_set.min_rating, self.train_set.max_rating, 0.0, 1.0)
return pred | def score(self, user_idx, item_idx=None):
'Predict the scores/ratings of a user for an item.\n\n Parameters\n ----------\n user_idx: int, required\n The index of the user for whom to perform score prediction.\n\n item_idx: int, optional, default: None\n The index of the item for which to perform score prediction.\n If None, scores for all known items will be returned.\n\n Returns\n -------\n res : A scalar or a Numpy array\n Relative scores that the user gives to the item or to all known items\n\n '
if (item_idx is None):
if self.train_set.is_unk_user(user_idx):
raise ScoreException(("Can't make score prediction for (user_id=%d)" % user_idx))
theta_u = self.bivae.mu_theta[user_idx].view(1, (- 1))
beta = self.bivae.mu_beta
known_item_scores = self.bivae.decode_user(theta_u, beta).cpu().numpy().ravel()
return known_item_scores
else:
if (self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx)):
raise ScoreException(("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx)))
theta_u = self.bivae.mu_theta[user_idx].view(1, (- 1))
beta_i = self.bivae.mu_beta[item_idx].view(1, (- 1))
pred = self.bivae.decode_user(theta_u, beta_i).cpu().numpy().ravel()
pred = scale(pred, self.train_set.min_rating, self.train_set.max_rating, 0.0, 1.0)
return pred<|docstring|>Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for which to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items<|endoftext|> |
d91c5d3a5c5ed19de8d56886df327e210b8cc58093c78de8c1a87d2d955f1c59 | def rain_outliers(rain_data: pd.DataFrame) -> pd.DataFrame:
'Generates an outlier index time series\n \n Finds the ratio between each value to the ninety-ninth percentile of non-zero values.\n\n Parameters\n ----------\n rain_data : pd.DataFrame\n A time series of rainfall amounts to be tested.\n\n Returns\n -------\n rain_outliers : pd.DataFrame\n A time series of outlier indices. \n\n '
'Rainfall quality check for outliers'
if (len(rain_data.index) < 100):
Output = pd.DataFrame(np.nan, columns=['Outlier'], index=rain_data.index)
else:
NonZeroRainData = rain_data.values[(rain_data.values > 0.2)]
NinetyNinth = np.quantile(NonZeroRainData, 0.99)
OutlierData = np.round((rain_data.values / NinetyNinth), 1)
Output = pd.DataFrame(OutlierData, columns=['Outlier'], index=rain_data.index)
return Output | Generates an outlier index time series
Finds the ratio between each value to the ninety-ninth percentile of non-zero values.
Parameters
----------
rain_data : pd.DataFrame
A time series of rainfall amounts to be tested.
Returns
-------
rain_outliers : pd.DataFrame
A time series of outlier indices. | src/RainDataChecks.py | rain_outliers | RainfallNZ/RainCheckPy | 0 | python | def rain_outliers(rain_data: pd.DataFrame) -> pd.DataFrame:
'Generates an outlier index time series\n \n Finds the ratio between each value to the ninety-ninth percentile of non-zero values.\n\n Parameters\n ----------\n rain_data : pd.DataFrame\n A time series of rainfall amounts to be tested.\n\n Returns\n -------\n rain_outliers : pd.DataFrame\n A time series of outlier indices. \n\n '
'Rainfall quality check for outliers'
if (len(rain_data.index) < 100):
Output = pd.DataFrame(np.nan, columns=['Outlier'], index=rain_data.index)
else:
NonZeroRainData = rain_data.values[(rain_data.values > 0.2)]
NinetyNinth = np.quantile(NonZeroRainData, 0.99)
OutlierData = np.round((rain_data.values / NinetyNinth), 1)
Output = pd.DataFrame(OutlierData, columns=['Outlier'], index=rain_data.index)
return Output | def rain_outliers(rain_data: pd.DataFrame) -> pd.DataFrame:
'Generates an outlier index time series\n \n Finds the ratio between each value to the ninety-ninth percentile of non-zero values.\n\n Parameters\n ----------\n rain_data : pd.DataFrame\n A time series of rainfall amounts to be tested.\n\n Returns\n -------\n rain_outliers : pd.DataFrame\n A time series of outlier indices. \n\n '
'Rainfall quality check for outliers'
if (len(rain_data.index) < 100):
Output = pd.DataFrame(np.nan, columns=['Outlier'], index=rain_data.index)
else:
NonZeroRainData = rain_data.values[(rain_data.values > 0.2)]
NinetyNinth = np.quantile(NonZeroRainData, 0.99)
OutlierData = np.round((rain_data.values / NinetyNinth), 1)
Output = pd.DataFrame(OutlierData, columns=['Outlier'], index=rain_data.index)
return Output<|docstring|>Generates an outlier index time series
Finds the ratio between each value to the ninety-ninth percentile of non-zero values.
Parameters
----------
rain_data : pd.DataFrame
A time series of rainfall amounts to be tested.
Returns
-------
rain_outliers : pd.DataFrame
A time series of outlier indices.<|endoftext|> |
2fb0cd71838bf76431f1c1647190180cb33f19d0e65f23a5ae46b81bd2e28be3 | def impossibles(rain_data, minimum_precision=float('nan')):
'rainfall quality check for impossible values'
NotANumber = (~ np.array([isinstance(item, numbers.Number) for item in rain_data.values[(:, 0)]]))
Sub_zeros = (rain_data.apply(pd.to_numeric, errors='coerce') < 0)
if ((not math.isnan(minimum_precision)) and (minimum_precision > 0)):
False_precision = ((rain_data.apply(pd.to_numeric, errors='coerce') % minimum_precision) != 0)
else:
False_precision = NotANumber
ImpossibleData = ((Sub_zeros.Rainfall.to_numpy() | NotANumber) | False_precision)
Output = pd.DataFrame(ImpossibleData, columns=['Impossible'], index=rain_data.index)
return Output | rainfall quality check for impossible values | src/RainDataChecks.py | impossibles | RainfallNZ/RainCheckPy | 0 | python | def impossibles(rain_data, minimum_precision=float('nan')):
NotANumber = (~ np.array([isinstance(item, numbers.Number) for item in rain_data.values[(:, 0)]]))
Sub_zeros = (rain_data.apply(pd.to_numeric, errors='coerce') < 0)
if ((not math.isnan(minimum_precision)) and (minimum_precision > 0)):
False_precision = ((rain_data.apply(pd.to_numeric, errors='coerce') % minimum_precision) != 0)
else:
False_precision = NotANumber
ImpossibleData = ((Sub_zeros.Rainfall.to_numpy() | NotANumber) | False_precision)
Output = pd.DataFrame(ImpossibleData, columns=['Impossible'], index=rain_data.index)
return Output | def impossibles(rain_data, minimum_precision=float('nan')):
NotANumber = (~ np.array([isinstance(item, numbers.Number) for item in rain_data.values[(:, 0)]]))
Sub_zeros = (rain_data.apply(pd.to_numeric, errors='coerce') < 0)
if ((not math.isnan(minimum_precision)) and (minimum_precision > 0)):
False_precision = ((rain_data.apply(pd.to_numeric, errors='coerce') % minimum_precision) != 0)
else:
False_precision = NotANumber
ImpossibleData = ((Sub_zeros.Rainfall.to_numpy() | NotANumber) | False_precision)
Output = pd.DataFrame(ImpossibleData, columns=['Impossible'], index=rain_data.index)
return Output<|docstring|>rainfall quality check for impossible values<|endoftext|> |
757a0cd6807c5a33a438c464c87abc51cb1511709529dec283aa9efcf4b5e69b | def DateTimeIssues(rain_data):
'rainfall quality check for duplicate date times'
DateTimeDuplicated = rain_data.index.duplicated(keep=False)
Output = pd.DataFrame(DateTimeDuplicated, columns=['DuplicateDateTimes'], index=rain_data.index)
return Output | rainfall quality check for duplicate date times | src/RainDataChecks.py | DateTimeIssues | RainfallNZ/RainCheckPy | 0 | python | def DateTimeIssues(rain_data):
DateTimeDuplicated = rain_data.index.duplicated(keep=False)
Output = pd.DataFrame(DateTimeDuplicated, columns=['DuplicateDateTimes'], index=rain_data.index)
return Output | def DateTimeIssues(rain_data):
DateTimeDuplicated = rain_data.index.duplicated(keep=False)
Output = pd.DataFrame(DateTimeDuplicated, columns=['DuplicateDateTimes'], index=rain_data.index)
return Output<|docstring|>rainfall quality check for duplicate date times<|endoftext|> |
82870295ce4668bfa927812346adce51dbc6d6c61b280cd1217c80a312bf5bda | def HighFrequencyTipping(rain_data):
'rainfall quality check for unlikely rapid tipping'
'from Blekinsop et al. (2017) lambda sub k statistic'
'This is only appropriate for raw tip-based data'
InterTipTimes = rain_data.index.to_series().diff().astype('timedelta64[s]')
HighFrequencyTips = np.zeros(len(InterTipTimes), dtype=bool)
LambdaSubK = np.log((InterTipTimes / InterTipTimes.shift(1))).abs()
RapidTipRateChanges = (LambdaSubK > 5)
SubThresholdInterTipTimesBoolean = (InterTipTimes < 5)
RapidTipRateChangeIndices = np.where(RapidTipRateChanges)
if (len(RapidTipRateChangeIndices[0]) > 0):
for index in RapidTipRateChangeIndices:
SubThresholdTripTime = SubThresholdInterTipTimesBoolean[index[0]]
NoOfSubThresholdTripTimes = 0
while SubThresholdTripTime:
NoOfSubThresholdTripTimes = (NoOfSubThresholdTripTimes + 1)
SubThresholdTripTime = SubThresholdInterTipTimesBoolean[(index[0] + NoOfSubThresholdTripTimes)]
HighFrequencyTips[index[0]:(index + NoOfSubThresholdTripTimes)[0]] = True
Output = pd.DataFrame(HighFrequencyTips, columns=['HighFrequencyTips'], index=rain_data.index)
return Output | rainfall quality check for unlikely rapid tipping | src/RainDataChecks.py | HighFrequencyTipping | RainfallNZ/RainCheckPy | 0 | python | def HighFrequencyTipping(rain_data):
'from Blekinsop et al. (2017) lambda sub k statistic'
'This is only appropriate for raw tip-based data'
InterTipTimes = rain_data.index.to_series().diff().astype('timedelta64[s]')
HighFrequencyTips = np.zeros(len(InterTipTimes), dtype=bool)
LambdaSubK = np.log((InterTipTimes / InterTipTimes.shift(1))).abs()
RapidTipRateChanges = (LambdaSubK > 5)
SubThresholdInterTipTimesBoolean = (InterTipTimes < 5)
RapidTipRateChangeIndices = np.where(RapidTipRateChanges)
if (len(RapidTipRateChangeIndices[0]) > 0):
for index in RapidTipRateChangeIndices:
SubThresholdTripTime = SubThresholdInterTipTimesBoolean[index[0]]
NoOfSubThresholdTripTimes = 0
while SubThresholdTripTime:
NoOfSubThresholdTripTimes = (NoOfSubThresholdTripTimes + 1)
SubThresholdTripTime = SubThresholdInterTipTimesBoolean[(index[0] + NoOfSubThresholdTripTimes)]
HighFrequencyTips[index[0]:(index + NoOfSubThresholdTripTimes)[0]] = True
Output = pd.DataFrame(HighFrequencyTips, columns=['HighFrequencyTips'], index=rain_data.index)
return Output | def HighFrequencyTipping(rain_data):
'from Blekinsop et al. (2017) lambda sub k statistic'
'This is only appropriate for raw tip-based data'
InterTipTimes = rain_data.index.to_series().diff().astype('timedelta64[s]')
HighFrequencyTips = np.zeros(len(InterTipTimes), dtype=bool)
LambdaSubK = np.log((InterTipTimes / InterTipTimes.shift(1))).abs()
RapidTipRateChanges = (LambdaSubK > 5)
SubThresholdInterTipTimesBoolean = (InterTipTimes < 5)
RapidTipRateChangeIndices = np.where(RapidTipRateChanges)
if (len(RapidTipRateChangeIndices[0]) > 0):
for index in RapidTipRateChangeIndices:
SubThresholdTripTime = SubThresholdInterTipTimesBoolean[index[0]]
NoOfSubThresholdTripTimes = 0
while SubThresholdTripTime:
NoOfSubThresholdTripTimes = (NoOfSubThresholdTripTimes + 1)
SubThresholdTripTime = SubThresholdInterTipTimesBoolean[(index[0] + NoOfSubThresholdTripTimes)]
HighFrequencyTips[index[0]:(index + NoOfSubThresholdTripTimes)[0]] = True
Output = pd.DataFrame(HighFrequencyTips, columns=['HighFrequencyTips'], index=rain_data.index)
return Output<|docstring|>rainfall quality check for unlikely rapid tipping<|endoftext|> |
8b2f0bf7c08dc56a38d9d8cb7dd33058d824ef279f9a69c61d08f679e8af6926 | def DrySpells(rain_data):
'rainfall quality check for dry spells'
'identify the length (in days) of a dry spell that a no-rain observation is within'
'Alternative method using runlength encoding'
DryObservations = pd.DataFrame((rain_data.values == 0), columns=['Dry'], index=rain_data.index)
RLE = [(k, sum((1 for i in g))) for (k, g) in itertools.groupby(DryObservations['Dry'])]
RunLengthCodes = [a_tuple[0] for a_tuple in RLE]
RunLengths = [a_tuple[1] for a_tuple in RLE]
RunLengthEndIndices = (np.cumsum(RunLengths) - 1)
RunLengthStartIndices = np.insert((RunLengthEndIndices[0:(- 1)] + 1), 0, 0, axis=0)
DryRunLengthEndIndices = RunLengthEndIndices[RunLengthCodes]
DryRunLengthStartIndices = RunLengthStartIndices[RunLengthCodes]
DryRunEndDateTimes = DryObservations.index[DryRunLengthEndIndices]
DryRunStartDateTimes = DryObservations.index[DryRunLengthStartIndices]
DryRunTimeLength = (DryRunEndDateTimes - DryRunStartDateTimes).days
DryObservations['DrySpellDayLengths'] = np.nan
DryObservations.iloc[(DryRunLengthEndIndices, DryObservations.columns.get_loc('DrySpellDayLengths'))] = DryRunTimeLength
DryObservations.DrySpellDayLengths.fillna(method='backfill', inplace=True)
DryObservations.loc[((~ DryObservations.Dry), 'DrySpellDayLengths')] = 0
Output = DryObservations[['DrySpellDayLengths']]
return Output | rainfall quality check for dry spells | src/RainDataChecks.py | DrySpells | RainfallNZ/RainCheckPy | 0 | python | def DrySpells(rain_data):
'identify the length (in days) of a dry spell that a no-rain observation is within'
'Alternative method using runlength encoding'
DryObservations = pd.DataFrame((rain_data.values == 0), columns=['Dry'], index=rain_data.index)
RLE = [(k, sum((1 for i in g))) for (k, g) in itertools.groupby(DryObservations['Dry'])]
RunLengthCodes = [a_tuple[0] for a_tuple in RLE]
RunLengths = [a_tuple[1] for a_tuple in RLE]
RunLengthEndIndices = (np.cumsum(RunLengths) - 1)
RunLengthStartIndices = np.insert((RunLengthEndIndices[0:(- 1)] + 1), 0, 0, axis=0)
DryRunLengthEndIndices = RunLengthEndIndices[RunLengthCodes]
DryRunLengthStartIndices = RunLengthStartIndices[RunLengthCodes]
DryRunEndDateTimes = DryObservations.index[DryRunLengthEndIndices]
DryRunStartDateTimes = DryObservations.index[DryRunLengthStartIndices]
DryRunTimeLength = (DryRunEndDateTimes - DryRunStartDateTimes).days
DryObservations['DrySpellDayLengths'] = np.nan
DryObservations.iloc[(DryRunLengthEndIndices, DryObservations.columns.get_loc('DrySpellDayLengths'))] = DryRunTimeLength
DryObservations.DrySpellDayLengths.fillna(method='backfill', inplace=True)
DryObservations.loc[((~ DryObservations.Dry), 'DrySpellDayLengths')] = 0
Output = DryObservations[['DrySpellDayLengths']]
return Output | def DrySpells(rain_data):
'identify the length (in days) of a dry spell that a no-rain observation is within'
'Alternative method using runlength encoding'
DryObservations = pd.DataFrame((rain_data.values == 0), columns=['Dry'], index=rain_data.index)
RLE = [(k, sum((1 for i in g))) for (k, g) in itertools.groupby(DryObservations['Dry'])]
RunLengthCodes = [a_tuple[0] for a_tuple in RLE]
RunLengths = [a_tuple[1] for a_tuple in RLE]
RunLengthEndIndices = (np.cumsum(RunLengths) - 1)
RunLengthStartIndices = np.insert((RunLengthEndIndices[0:(- 1)] + 1), 0, 0, axis=0)
DryRunLengthEndIndices = RunLengthEndIndices[RunLengthCodes]
DryRunLengthStartIndices = RunLengthStartIndices[RunLengthCodes]
DryRunEndDateTimes = DryObservations.index[DryRunLengthEndIndices]
DryRunStartDateTimes = DryObservations.index[DryRunLengthStartIndices]
DryRunTimeLength = (DryRunEndDateTimes - DryRunStartDateTimes).days
DryObservations['DrySpellDayLengths'] = np.nan
DryObservations.iloc[(DryRunLengthEndIndices, DryObservations.columns.get_loc('DrySpellDayLengths'))] = DryRunTimeLength
DryObservations.DrySpellDayLengths.fillna(method='backfill', inplace=True)
DryObservations.loc[((~ DryObservations.Dry), 'DrySpellDayLengths')] = 0
Output = DryObservations[['DrySpellDayLengths']]
return Output<|docstring|>rainfall quality check for dry spells<|endoftext|> |
5789f5e6bf219e7104de6f093c524158789b42233706c21359b56fd57aa1a944 | def RepeatedValues(rain_data):
'rainfall quality check for unlikely repeating values'
'identify the length (in consecutive time units) that a value is repeated'
'this check should not be applied to tip data'
WetObservations = pd.DataFrame(((rain_data.values > 0) * rain_data.values), columns=['Wet'], index=rain_data.index)
RLE = [(k, sum((1 for i in g))) for (k, g) in itertools.groupby(rain_data.iloc[(:, 0)])]
RunLengthCodes = np.array([a_tuple[0] for a_tuple in RLE])
RunLengths = np.array([a_tuple[1] for a_tuple in RLE])
RunLengthEndIndices = (np.cumsum(RunLengths) - 1)
WetRunLengthEndIndices = RunLengthEndIndices[(RunLengthCodes > 0)]
WetRunLengths = RunLengths[(RunLengthCodes > 0)]
WetObservations['RepeatedValues'] = np.nan
WetObservations.iloc[(WetRunLengthEndIndices, WetObservations.columns.get_loc('RepeatedValues'))] = WetRunLengths
WetObservations.RepeatedValues.fillna(method='backfill', inplace=True)
WetObservations.loc[((~ (WetObservations.Wet > 0)), 'RepeatedValues')] = 0
Output = WetObservations[['RepeatedValues']]
return Output | rainfall quality check for unlikely repeating values | src/RainDataChecks.py | RepeatedValues | RainfallNZ/RainCheckPy | 0 | python | def RepeatedValues(rain_data):
'identify the length (in consecutive time units) that a value is repeated'
'this check should not be applied to tip data'
WetObservations = pd.DataFrame(((rain_data.values > 0) * rain_data.values), columns=['Wet'], index=rain_data.index)
RLE = [(k, sum((1 for i in g))) for (k, g) in itertools.groupby(rain_data.iloc[(:, 0)])]
RunLengthCodes = np.array([a_tuple[0] for a_tuple in RLE])
RunLengths = np.array([a_tuple[1] for a_tuple in RLE])
RunLengthEndIndices = (np.cumsum(RunLengths) - 1)
WetRunLengthEndIndices = RunLengthEndIndices[(RunLengthCodes > 0)]
WetRunLengths = RunLengths[(RunLengthCodes > 0)]
WetObservations['RepeatedValues'] = np.nan
WetObservations.iloc[(WetRunLengthEndIndices, WetObservations.columns.get_loc('RepeatedValues'))] = WetRunLengths
WetObservations.RepeatedValues.fillna(method='backfill', inplace=True)
WetObservations.loc[((~ (WetObservations.Wet > 0)), 'RepeatedValues')] = 0
Output = WetObservations[['RepeatedValues']]
return Output | def RepeatedValues(rain_data):
'identify the length (in consecutive time units) that a value is repeated'
'this check should not be applied to tip data'
WetObservations = pd.DataFrame(((rain_data.values > 0) * rain_data.values), columns=['Wet'], index=rain_data.index)
RLE = [(k, sum((1 for i in g))) for (k, g) in itertools.groupby(rain_data.iloc[(:, 0)])]
RunLengthCodes = np.array([a_tuple[0] for a_tuple in RLE])
RunLengths = np.array([a_tuple[1] for a_tuple in RLE])
RunLengthEndIndices = (np.cumsum(RunLengths) - 1)
WetRunLengthEndIndices = RunLengthEndIndices[(RunLengthCodes > 0)]
WetRunLengths = RunLengths[(RunLengthCodes > 0)]
WetObservations['RepeatedValues'] = np.nan
WetObservations.iloc[(WetRunLengthEndIndices, WetObservations.columns.get_loc('RepeatedValues'))] = WetRunLengths
WetObservations.RepeatedValues.fillna(method='backfill', inplace=True)
WetObservations.loc[((~ (WetObservations.Wet > 0)), 'RepeatedValues')] = 0
Output = WetObservations[['RepeatedValues']]
return Output<|docstring|>rainfall quality check for unlikely repeating values<|endoftext|> |
0b8eba1713d2d1bcdc25650c47f54c01f12949bfd6b17ee9940ccdaf0fa76bfe | def Homogeneity(rain_data):
'Applies the Pettitt non-parameteric test to annual series to determine if there are major inhomogeneities in the data\n If there is, the test is repeated on the most recent side of the inhomogeneity to test if there is another.\n The most recent section that is homogeneous is retained and the remainder flagged.\n This uses the pyHomogeneity package https://github.com/mmhs013/pyHomogeneity\n '
import pyhomogeneity as hg
if (len(rain_data.index) < 100):
Output = pd.DataFrame(np.nan, columns=['Homogeneous'], index=rain_data.index)
else:
Homogeneous = pd.DataFrame(True, columns=['Homogeneous', 'ChangePoint'], index=rain_data.index)
DataStepLengthInHours = ((rain_data.index[1] - rain_data.index[0]).total_seconds() // 3600)
AnnualData = rain_data.resample('1y').sum(min_count=int((((0.96 * 365) * 24) / DataStepLengthInHours)))
if (AnnualData.count().any() > 3):
result = hg.pettitt_test(AnnualData)
MoreInhomogeneity = result.h
while MoreInhomogeneity:
Homogeneous[(Homogeneous.index < pd.to_datetime(result.cp))] = False
if (len(AnnualData[(AnnualData.index > pd.to_datetime(result.cp))]) > 3):
result = hg.pettitt_test(AnnualData[(AnnualData.index > pd.to_datetime(result.cp))])
MoreInhomogeneity = result.h
else:
MoreInhomogeneity = False
Output = Homogeneous
return Output | Applies the Pettitt non-parameteric test to annual series to determine if there are major inhomogeneities in the data
If there is, the test is repeated on the most recent side of the inhomogeneity to test if there is another.
The most recent section that is homogeneous is retained and the remainder flagged.
This uses the pyHomogeneity package https://github.com/mmhs013/pyHomogeneity | src/RainDataChecks.py | Homogeneity | RainfallNZ/RainCheckPy | 0 | python | def Homogeneity(rain_data):
'Applies the Pettitt non-parameteric test to annual series to determine if there are major inhomogeneities in the data\n If there is, the test is repeated on the most recent side of the inhomogeneity to test if there is another.\n The most recent section that is homogeneous is retained and the remainder flagged.\n This uses the pyHomogeneity package https://github.com/mmhs013/pyHomogeneity\n '
import pyhomogeneity as hg
if (len(rain_data.index) < 100):
Output = pd.DataFrame(np.nan, columns=['Homogeneous'], index=rain_data.index)
else:
Homogeneous = pd.DataFrame(True, columns=['Homogeneous', 'ChangePoint'], index=rain_data.index)
DataStepLengthInHours = ((rain_data.index[1] - rain_data.index[0]).total_seconds() // 3600)
AnnualData = rain_data.resample('1y').sum(min_count=int((((0.96 * 365) * 24) / DataStepLengthInHours)))
if (AnnualData.count().any() > 3):
result = hg.pettitt_test(AnnualData)
MoreInhomogeneity = result.h
while MoreInhomogeneity:
Homogeneous[(Homogeneous.index < pd.to_datetime(result.cp))] = False
if (len(AnnualData[(AnnualData.index > pd.to_datetime(result.cp))]) > 3):
result = hg.pettitt_test(AnnualData[(AnnualData.index > pd.to_datetime(result.cp))])
MoreInhomogeneity = result.h
else:
MoreInhomogeneity = False
Output = Homogeneous
return Output | def Homogeneity(rain_data):
'Applies the Pettitt non-parameteric test to annual series to determine if there are major inhomogeneities in the data\n If there is, the test is repeated on the most recent side of the inhomogeneity to test if there is another.\n The most recent section that is homogeneous is retained and the remainder flagged.\n This uses the pyHomogeneity package https://github.com/mmhs013/pyHomogeneity\n '
import pyhomogeneity as hg
if (len(rain_data.index) < 100):
Output = pd.DataFrame(np.nan, columns=['Homogeneous'], index=rain_data.index)
else:
Homogeneous = pd.DataFrame(True, columns=['Homogeneous', 'ChangePoint'], index=rain_data.index)
DataStepLengthInHours = ((rain_data.index[1] - rain_data.index[0]).total_seconds() // 3600)
AnnualData = rain_data.resample('1y').sum(min_count=int((((0.96 * 365) * 24) / DataStepLengthInHours)))
if (AnnualData.count().any() > 3):
result = hg.pettitt_test(AnnualData)
MoreInhomogeneity = result.h
while MoreInhomogeneity:
Homogeneous[(Homogeneous.index < pd.to_datetime(result.cp))] = False
if (len(AnnualData[(AnnualData.index > pd.to_datetime(result.cp))]) > 3):
result = hg.pettitt_test(AnnualData[(AnnualData.index > pd.to_datetime(result.cp))])
MoreInhomogeneity = result.h
else:
MoreInhomogeneity = False
Output = Homogeneous
return Output<|docstring|>Applies the Pettitt non-parameteric test to annual series to determine if there are major inhomogeneities in the data
If there is, the test is repeated on the most recent side of the inhomogeneity to test if there is another.
The most recent section that is homogeneous is retained and the remainder flagged.
This uses the pyHomogeneity package https://github.com/mmhs013/pyHomogeneity<|endoftext|> |
38cb4269d7a7175d24d9bac3c3bdb030d3fdcafb2370af533e67ad714449f327 | def SubFreezingRain(rain_data, temperature_data):
'"rainfall quality check for observations during freezing temperatures\n identify the observations when the maximum temperature was less than zero degrees C\n '
RainAndTemperature = pd.merge(left=rain_data, right=temperature_data, left_index=True, right_index=True, how='left')
RainAndTemperature['FreezingRain'] = ((RainAndTemperature.Rainfall > 0) & (RainAndTemperature.TMax < 0))
Output = RainAndTemperature['FreezingRain']
return Output | "rainfall quality check for observations during freezing temperatures
identify the observations when the maximum temperature was less than zero degrees C | src/RainDataChecks.py | SubFreezingRain | RainfallNZ/RainCheckPy | 0 | python | def SubFreezingRain(rain_data, temperature_data):
'"rainfall quality check for observations during freezing temperatures\n identify the observations when the maximum temperature was less than zero degrees C\n '
RainAndTemperature = pd.merge(left=rain_data, right=temperature_data, left_index=True, right_index=True, how='left')
RainAndTemperature['FreezingRain'] = ((RainAndTemperature.Rainfall > 0) & (RainAndTemperature.TMax < 0))
Output = RainAndTemperature['FreezingRain']
return Output | def SubFreezingRain(rain_data, temperature_data):
'"rainfall quality check for observations during freezing temperatures\n identify the observations when the maximum temperature was less than zero degrees C\n '
RainAndTemperature = pd.merge(left=rain_data, right=temperature_data, left_index=True, right_index=True, how='left')
RainAndTemperature['FreezingRain'] = ((RainAndTemperature.Rainfall > 0) & (RainAndTemperature.TMax < 0))
Output = RainAndTemperature['FreezingRain']
return Output<|docstring|>"rainfall quality check for observations during freezing temperatures
identify the observations when the maximum temperature was less than zero degrees C<|endoftext|> |
57b205985341d6e34c0dbd648078cef433b66ceaf071d77c69b4c1744e5ba94d | def RelatedFlowEvents(rain_data, Daily_streamflow_data):
'"rainfall quality check for observations compared to flow events\n for each time step allocate the relative magnitude of a peak flow event ocurring on the same day or the day after\n but only if rain events are associated with flow events\n used with daily streamflow and hourly rainfall, possibly daily rainfall, but it hasn\'t been tested yet.\'\n '
if ((max(rain_data.index) - min(rain_data.index)) < pd.Timedelta('2 days')):
RainAndFlow = rain_data.copy()
RainAndFlow['Peak_prominence'] = np.nan
else:
peaks = find_peaks(Daily_streamflow_data['Streamflow'], height=0, prominence=(Daily_streamflow_data.mean().item() * 0.1), wlen=3)
DaysWithPeaks = Daily_streamflow_data.index[peaks[0]]
NinetyFifthPP = np.quantile(peaks[1]['prominences'], 0.95)
RelativeProminence = np.round((peaks[1]['prominences'] / NinetyFifthPP), 3)
FlowPeakSeries = pd.DataFrame(index=Daily_streamflow_data.index, columns=['Peak_prominence'])
FlowPeakSeries.loc[(DaysWithPeaks, 'Peak_prominence')] = RelativeProminence
if (FlowPeakSeries.index.tzinfo is not None):
FlowPeakSeries.index = FlowPeakSeries.index.tz_convert(pytz.timezone('Etc/GMT-12'))
FlowPeakSeries.index = FlowPeakSeries.index.tz_localize(None)
RainAndFlow = pd.merge(left=rain_data, right=FlowPeakSeries['Peak_prominence'], left_index=True, right_index=True, how='left')
FillLength = int((24 // ((RainAndFlow.index[1] - RainAndFlow.index[0]).total_seconds() // 3600)))
RainAndFlow.loc[(:, 'Peak_prominence')] = RainAndFlow.loc[(:, 'Peak_prominence')].fillna(method='pad', limit=(FillLength - 1)).fillna(method='bfill', limit=FillLength).fillna(0)
NonZeroRainData = rain_data.values[(rain_data.values > 0)]
NinetyNinth = np.quantile(NonZeroRainData, 0.99)
HighRainHours = (RainAndFlow['Rainfall'] > NinetyNinth)
TimeDifferenceSeriesOnhighRainEvents = HighRainHours[HighRainHours].index.to_series().diff().to_frame()
TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'] = TimeDifferenceSeriesOnhighRainEvents['DateTime'].dt.round('12H')
TimeDifferenceSeriesOnhighRainEvents['Rainfall'] = RainAndFlow.loc[(HighRainHours, 'Rainfall')]
TimeDifferenceSeriesOnhighRainEvents['Peak_prominence'] = RainAndFlow.loc[(HighRainHours, 'Peak_prominence')]
RainEventPeakProminence = TimeDifferenceSeriesOnhighRainEvents.groupby((TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'] != TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'].shift()).cumsum(), as_index=False).agg({'DateTime': 'first', 'Rainfall': 'max', 'Peak_prominence': 'max'})
NumberOfSamples = min(len(RainAndFlow.index), 10000)
RandomPeakProminence = RainAndFlow.loc[(RainAndFlow.index[random.sample(range(0, len(RainAndFlow.index)), NumberOfSamples)], 'Peak_prominence')]
RandomPeakLilelihood = (np.count_nonzero(RandomPeakProminence) / NumberOfSamples)
ProbabilityThatFlowEventsDuringHighRainEventsMatchesRandom = st.binom_test(x=np.count_nonzero(RainEventPeakProminence['Peak_prominence']), n=RainEventPeakProminence['Peak_prominence'].count(), p=RandomPeakLilelihood)
if (ProbabilityThatFlowEventsDuringHighRainEventsMatchesRandom > 0.01):
RainAndFlow['Peak_prominence'] = np.nan
Output = RainAndFlow['Peak_prominence']
return Output | "rainfall quality check for observations compared to flow events
for each time step allocate the relative magnitude of a peak flow event ocurring on the same day or the day after
but only if rain events are associated with flow events
used with daily streamflow and hourly rainfall, possibly daily rainfall, but it hasn't been tested yet.' | src/RainDataChecks.py | RelatedFlowEvents | RainfallNZ/RainCheckPy | 0 | python | def RelatedFlowEvents(rain_data, Daily_streamflow_data):
'"rainfall quality check for observations compared to flow events\n for each time step allocate the relative magnitude of a peak flow event ocurring on the same day or the day after\n but only if rain events are associated with flow events\n used with daily streamflow and hourly rainfall, possibly daily rainfall, but it hasn\'t been tested yet.\'\n '
if ((max(rain_data.index) - min(rain_data.index)) < pd.Timedelta('2 days')):
RainAndFlow = rain_data.copy()
RainAndFlow['Peak_prominence'] = np.nan
else:
peaks = find_peaks(Daily_streamflow_data['Streamflow'], height=0, prominence=(Daily_streamflow_data.mean().item() * 0.1), wlen=3)
DaysWithPeaks = Daily_streamflow_data.index[peaks[0]]
NinetyFifthPP = np.quantile(peaks[1]['prominences'], 0.95)
RelativeProminence = np.round((peaks[1]['prominences'] / NinetyFifthPP), 3)
FlowPeakSeries = pd.DataFrame(index=Daily_streamflow_data.index, columns=['Peak_prominence'])
FlowPeakSeries.loc[(DaysWithPeaks, 'Peak_prominence')] = RelativeProminence
if (FlowPeakSeries.index.tzinfo is not None):
FlowPeakSeries.index = FlowPeakSeries.index.tz_convert(pytz.timezone('Etc/GMT-12'))
FlowPeakSeries.index = FlowPeakSeries.index.tz_localize(None)
RainAndFlow = pd.merge(left=rain_data, right=FlowPeakSeries['Peak_prominence'], left_index=True, right_index=True, how='left')
FillLength = int((24 // ((RainAndFlow.index[1] - RainAndFlow.index[0]).total_seconds() // 3600)))
RainAndFlow.loc[(:, 'Peak_prominence')] = RainAndFlow.loc[(:, 'Peak_prominence')].fillna(method='pad', limit=(FillLength - 1)).fillna(method='bfill', limit=FillLength).fillna(0)
NonZeroRainData = rain_data.values[(rain_data.values > 0)]
NinetyNinth = np.quantile(NonZeroRainData, 0.99)
HighRainHours = (RainAndFlow['Rainfall'] > NinetyNinth)
TimeDifferenceSeriesOnhighRainEvents = HighRainHours[HighRainHours].index.to_series().diff().to_frame()
TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'] = TimeDifferenceSeriesOnhighRainEvents['DateTime'].dt.round('12H')
TimeDifferenceSeriesOnhighRainEvents['Rainfall'] = RainAndFlow.loc[(HighRainHours, 'Rainfall')]
TimeDifferenceSeriesOnhighRainEvents['Peak_prominence'] = RainAndFlow.loc[(HighRainHours, 'Peak_prominence')]
RainEventPeakProminence = TimeDifferenceSeriesOnhighRainEvents.groupby((TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'] != TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'].shift()).cumsum(), as_index=False).agg({'DateTime': 'first', 'Rainfall': 'max', 'Peak_prominence': 'max'})
NumberOfSamples = min(len(RainAndFlow.index), 10000)
RandomPeakProminence = RainAndFlow.loc[(RainAndFlow.index[random.sample(range(0, len(RainAndFlow.index)), NumberOfSamples)], 'Peak_prominence')]
RandomPeakLilelihood = (np.count_nonzero(RandomPeakProminence) / NumberOfSamples)
ProbabilityThatFlowEventsDuringHighRainEventsMatchesRandom = st.binom_test(x=np.count_nonzero(RainEventPeakProminence['Peak_prominence']), n=RainEventPeakProminence['Peak_prominence'].count(), p=RandomPeakLilelihood)
if (ProbabilityThatFlowEventsDuringHighRainEventsMatchesRandom > 0.01):
RainAndFlow['Peak_prominence'] = np.nan
Output = RainAndFlow['Peak_prominence']
return Output | def RelatedFlowEvents(rain_data, Daily_streamflow_data):
'"rainfall quality check for observations compared to flow events\n for each time step allocate the relative magnitude of a peak flow event ocurring on the same day or the day after\n but only if rain events are associated with flow events\n used with daily streamflow and hourly rainfall, possibly daily rainfall, but it hasn\'t been tested yet.\'\n '
if ((max(rain_data.index) - min(rain_data.index)) < pd.Timedelta('2 days')):
RainAndFlow = rain_data.copy()
RainAndFlow['Peak_prominence'] = np.nan
else:
peaks = find_peaks(Daily_streamflow_data['Streamflow'], height=0, prominence=(Daily_streamflow_data.mean().item() * 0.1), wlen=3)
DaysWithPeaks = Daily_streamflow_data.index[peaks[0]]
NinetyFifthPP = np.quantile(peaks[1]['prominences'], 0.95)
RelativeProminence = np.round((peaks[1]['prominences'] / NinetyFifthPP), 3)
FlowPeakSeries = pd.DataFrame(index=Daily_streamflow_data.index, columns=['Peak_prominence'])
FlowPeakSeries.loc[(DaysWithPeaks, 'Peak_prominence')] = RelativeProminence
if (FlowPeakSeries.index.tzinfo is not None):
FlowPeakSeries.index = FlowPeakSeries.index.tz_convert(pytz.timezone('Etc/GMT-12'))
FlowPeakSeries.index = FlowPeakSeries.index.tz_localize(None)
RainAndFlow = pd.merge(left=rain_data, right=FlowPeakSeries['Peak_prominence'], left_index=True, right_index=True, how='left')
FillLength = int((24 // ((RainAndFlow.index[1] - RainAndFlow.index[0]).total_seconds() // 3600)))
RainAndFlow.loc[(:, 'Peak_prominence')] = RainAndFlow.loc[(:, 'Peak_prominence')].fillna(method='pad', limit=(FillLength - 1)).fillna(method='bfill', limit=FillLength).fillna(0)
NonZeroRainData = rain_data.values[(rain_data.values > 0)]
NinetyNinth = np.quantile(NonZeroRainData, 0.99)
HighRainHours = (RainAndFlow['Rainfall'] > NinetyNinth)
TimeDifferenceSeriesOnhighRainEvents = HighRainHours[HighRainHours].index.to_series().diff().to_frame()
TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'] = TimeDifferenceSeriesOnhighRainEvents['DateTime'].dt.round('12H')
TimeDifferenceSeriesOnhighRainEvents['Rainfall'] = RainAndFlow.loc[(HighRainHours, 'Rainfall')]
TimeDifferenceSeriesOnhighRainEvents['Peak_prominence'] = RainAndFlow.loc[(HighRainHours, 'Peak_prominence')]
RainEventPeakProminence = TimeDifferenceSeriesOnhighRainEvents.groupby((TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'] != TimeDifferenceSeriesOnhighRainEvents['RoundedDateTime'].shift()).cumsum(), as_index=False).agg({'DateTime': 'first', 'Rainfall': 'max', 'Peak_prominence': 'max'})
NumberOfSamples = min(len(RainAndFlow.index), 10000)
RandomPeakProminence = RainAndFlow.loc[(RainAndFlow.index[random.sample(range(0, len(RainAndFlow.index)), NumberOfSamples)], 'Peak_prominence')]
RandomPeakLilelihood = (np.count_nonzero(RandomPeakProminence) / NumberOfSamples)
ProbabilityThatFlowEventsDuringHighRainEventsMatchesRandom = st.binom_test(x=np.count_nonzero(RainEventPeakProminence['Peak_prominence']), n=RainEventPeakProminence['Peak_prominence'].count(), p=RandomPeakLilelihood)
if (ProbabilityThatFlowEventsDuringHighRainEventsMatchesRandom > 0.01):
RainAndFlow['Peak_prominence'] = np.nan
Output = RainAndFlow['Peak_prominence']
return Output<|docstring|>"rainfall quality check for observations compared to flow events
for each time step allocate the relative magnitude of a peak flow event ocurring on the same day or the day after
but only if rain events are associated with flow events
used with daily streamflow and hourly rainfall, possibly daily rainfall, but it hasn't been tested yet.'<|endoftext|> |
d7ac035360831bf7c76989cddbe0167636082535b82f4606631a6b38f428d50f | def affinity(TestData, ReferenceData):
'Compare the data between two sites to see how similar they are'
'this uses an "affinity" index from Lewis et al. 2018, supplementary material'
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
TestWetDry = (result.Test > 0)
ReferenceWetDry = (result.Reference > 0)
BothWet = ((TestWetDry * 1) + (ReferenceWetDry * 1))
CombinedTotals = BothWet.groupby(BothWet.values).count()
if ((0 in CombinedTotals) & (2 in CombinedTotals)):
Affinity = ((CombinedTotals[0] / CombinedTotals.sum()) + (CombinedTotals[2] / CombinedTotals.sum()))
else:
Affinity = 0
return Affinity | Compare the data between two sites to see how similar they are | src/RainDataChecks.py | affinity | RainfallNZ/RainCheckPy | 0 | python | def affinity(TestData, ReferenceData):
'this uses an "affinity" index from Lewis et al. 2018, supplementary material'
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
TestWetDry = (result.Test > 0)
ReferenceWetDry = (result.Reference > 0)
BothWet = ((TestWetDry * 1) + (ReferenceWetDry * 1))
CombinedTotals = BothWet.groupby(BothWet.values).count()
if ((0 in CombinedTotals) & (2 in CombinedTotals)):
Affinity = ((CombinedTotals[0] / CombinedTotals.sum()) + (CombinedTotals[2] / CombinedTotals.sum()))
else:
Affinity = 0
return Affinity | def affinity(TestData, ReferenceData):
'this uses an "affinity" index from Lewis et al. 2018, supplementary material'
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
TestWetDry = (result.Test > 0)
ReferenceWetDry = (result.Reference > 0)
BothWet = ((TestWetDry * 1) + (ReferenceWetDry * 1))
CombinedTotals = BothWet.groupby(BothWet.values).count()
if ((0 in CombinedTotals) & (2 in CombinedTotals)):
Affinity = ((CombinedTotals[0] / CombinedTotals.sum()) + (CombinedTotals[2] / CombinedTotals.sum()))
else:
Affinity = 0
return Affinity<|docstring|>Compare the data between two sites to see how similar they are<|endoftext|> |
8459a18ad57f6196cda336a9e17a7fdbdc224566b4329e44f8e295f40300f136 | def spearman(TestData, ReferenceData):
'calculate the Spearman rank correlation coefficient between sites'
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
CorrelationMatrix = result.corr(method='spearman')
Spearman = CorrelationMatrix.Test['Reference']
return Spearman | calculate the Spearman rank correlation coefficient between sites | src/RainDataChecks.py | spearman | RainfallNZ/RainCheckPy | 0 | python | def spearman(TestData, ReferenceData):
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
CorrelationMatrix = result.corr(method='spearman')
Spearman = CorrelationMatrix.Test['Reference']
return Spearman | def spearman(TestData, ReferenceData):
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
CorrelationMatrix = result.corr(method='spearman')
Spearman = CorrelationMatrix.Test['Reference']
return Spearman<|docstring|>calculate the Spearman rank correlation coefficient between sites<|endoftext|> |
25c588cd8ebd6db589d16d9b9727abdc5ff2b82ef274b7f0345a76d8fbbb52b8 | def neighborhoodDivergence(TestData: pd.DataFrame, ReferenceData: pd.DataFrame) -> pd.DataFrame:
"Compares rainfall amounts to a another site\n \n Finds the ratio between the daily rainfall difference and the ninety-fifth percentile of\n the distribution of daily differences. This is analogous to the rain_outliers test\n but is based on comparison to an alternative site.\n This generates two values, the high divergence and the low divergence.\n High divergence is for when the Test value is higher than the reference value i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)\n Low divergence is for when the Test value is lower than the reference value, i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)\n \n Parameters\n ----------\n TestData : pd.DataFrame\n A time series of rainfall amounts for the site being tested.\n ReferenceData : pd.DataFrame\n A time series of rainfall amounts for the site to be compared with.\n\n Returns\n -------\n neighborhoodDivergence : pd.DataFrame\n A time series of 'LowOutlierData' and 'HighOutlierData'.\n High divergence is for when the Test value is higher than the reference value \n i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)\n Low divergence is for when the Test value is lower than the reference value, \n i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)\n\n "
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
result = result.dropna()
result['Differences'] = (result.Test - result.Reference)
if (sum((result.Differences > 0)) > 0):
PosNinetyFifth = np.quantile(result.Differences[(result.Differences > 0)], 0.95)
result['HighOutlierData'] = np.round((result.Differences / PosNinetyFifth), 1)
result.loc[((result['HighOutlierData'] <= 0), 'HighOutlierData')] = 0
else:
result['HighOutlierData'] = 0
if (sum((result.Differences < 0)) > 0):
NegFifth = np.quantile(result.Differences[(result.Differences < 0)], 0.05)
result['LowOutlierData'] = np.round((result.Differences / NegFifth), 1)
result.loc[((result['LowOutlierData'] <= 0), 'LowOutlierData')] = 0
else:
result['LowOutlierData'] = 0
neighborhoodDivergence = pd.merge(result, TestData, on='DateTime', how='right')[['LowOutlierData', 'HighOutlierData']]
return neighborhoodDivergence | Compares rainfall amounts to a another site
Finds the ratio between the daily rainfall difference and the ninety-fifth percentile of
the distribution of daily differences. This is analogous to the rain_outliers test
but is based on comparison to an alternative site.
This generates two values, the high divergence and the low divergence.
High divergence is for when the Test value is higher than the reference value i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)
Low divergence is for when the Test value is lower than the reference value, i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)
Parameters
----------
TestData : pd.DataFrame
A time series of rainfall amounts for the site being tested.
ReferenceData : pd.DataFrame
A time series of rainfall amounts for the site to be compared with.
Returns
-------
neighborhoodDivergence : pd.DataFrame
A time series of 'LowOutlierData' and 'HighOutlierData'.
High divergence is for when the Test value is higher than the reference value
i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)
Low divergence is for when the Test value is lower than the reference value,
i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference) | src/RainDataChecks.py | neighborhoodDivergence | RainfallNZ/RainCheckPy | 0 | python | def neighborhoodDivergence(TestData: pd.DataFrame, ReferenceData: pd.DataFrame) -> pd.DataFrame:
"Compares rainfall amounts to a another site\n \n Finds the ratio between the daily rainfall difference and the ninety-fifth percentile of\n the distribution of daily differences. This is analogous to the rain_outliers test\n but is based on comparison to an alternative site.\n This generates two values, the high divergence and the low divergence.\n High divergence is for when the Test value is higher than the reference value i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)\n Low divergence is for when the Test value is lower than the reference value, i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)\n \n Parameters\n ----------\n TestData : pd.DataFrame\n A time series of rainfall amounts for the site being tested.\n ReferenceData : pd.DataFrame\n A time series of rainfall amounts for the site to be compared with.\n\n Returns\n -------\n neighborhoodDivergence : pd.DataFrame\n A time series of 'LowOutlierData' and 'HighOutlierData'.\n High divergence is for when the Test value is higher than the reference value \n i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)\n Low divergence is for when the Test value is lower than the reference value, \n i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)\n\n "
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
result = result.dropna()
result['Differences'] = (result.Test - result.Reference)
if (sum((result.Differences > 0)) > 0):
PosNinetyFifth = np.quantile(result.Differences[(result.Differences > 0)], 0.95)
result['HighOutlierData'] = np.round((result.Differences / PosNinetyFifth), 1)
result.loc[((result['HighOutlierData'] <= 0), 'HighOutlierData')] = 0
else:
result['HighOutlierData'] = 0
if (sum((result.Differences < 0)) > 0):
NegFifth = np.quantile(result.Differences[(result.Differences < 0)], 0.05)
result['LowOutlierData'] = np.round((result.Differences / NegFifth), 1)
result.loc[((result['LowOutlierData'] <= 0), 'LowOutlierData')] = 0
else:
result['LowOutlierData'] = 0
neighborhoodDivergence = pd.merge(result, TestData, on='DateTime', how='right')[['LowOutlierData', 'HighOutlierData']]
return neighborhoodDivergence | def neighborhoodDivergence(TestData: pd.DataFrame, ReferenceData: pd.DataFrame) -> pd.DataFrame:
"Compares rainfall amounts to a another site\n \n Finds the ratio between the daily rainfall difference and the ninety-fifth percentile of\n the distribution of daily differences. This is analogous to the rain_outliers test\n but is based on comparison to an alternative site.\n This generates two values, the high divergence and the low divergence.\n High divergence is for when the Test value is higher than the reference value i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)\n Low divergence is for when the Test value is lower than the reference value, i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)\n \n Parameters\n ----------\n TestData : pd.DataFrame\n A time series of rainfall amounts for the site being tested.\n ReferenceData : pd.DataFrame\n A time series of rainfall amounts for the site to be compared with.\n\n Returns\n -------\n neighborhoodDivergence : pd.DataFrame\n A time series of 'LowOutlierData' and 'HighOutlierData'.\n High divergence is for when the Test value is higher than the reference value \n i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)\n Low divergence is for when the Test value is lower than the reference value, \n i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)\n\n "
result = TestData.join(ReferenceData, how='inner', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
result = result.dropna()
result['Differences'] = (result.Test - result.Reference)
if (sum((result.Differences > 0)) > 0):
PosNinetyFifth = np.quantile(result.Differences[(result.Differences > 0)], 0.95)
result['HighOutlierData'] = np.round((result.Differences / PosNinetyFifth), 1)
result.loc[((result['HighOutlierData'] <= 0), 'HighOutlierData')] = 0
else:
result['HighOutlierData'] = 0
if (sum((result.Differences < 0)) > 0):
NegFifth = np.quantile(result.Differences[(result.Differences < 0)], 0.05)
result['LowOutlierData'] = np.round((result.Differences / NegFifth), 1)
result.loc[((result['LowOutlierData'] <= 0), 'LowOutlierData')] = 0
else:
result['LowOutlierData'] = 0
neighborhoodDivergence = pd.merge(result, TestData, on='DateTime', how='right')[['LowOutlierData', 'HighOutlierData']]
return neighborhoodDivergence<|docstring|>Compares rainfall amounts to a another site
Finds the ratio between the daily rainfall difference and the ninety-fifth percentile of
the distribution of daily differences. This is analogous to the rain_outliers test
but is based on comparison to an alternative site.
This generates two values, the high divergence and the low divergence.
High divergence is for when the Test value is higher than the reference value i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)
Low divergence is for when the Test value is lower than the reference value, i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)
Parameters
----------
TestData : pd.DataFrame
A time series of rainfall amounts for the site being tested.
ReferenceData : pd.DataFrame
A time series of rainfall amounts for the site to be compared with.
Returns
-------
neighborhoodDivergence : pd.DataFrame
A time series of 'LowOutlierData' and 'HighOutlierData'.
High divergence is for when the Test value is higher than the reference value
i.e. where the ratio of the max(0,Test - Reference) / 95th(max(0,Test - Reference)
Low divergence is for when the Test value is lower than the reference value,
i.e. ratio of the min(0,Test - Reference) / 5th(min(0,Test - Reference)<|endoftext|> |
c3a218e276564c078681a5cc3b18d3203a1768b1b87de30e9d7448dce5688522 | def DrySpellDivergence(TestData, ReferenceData):
'find the ratio between the 15-day dry spell proportion difference and the ninety-fifth percentile of'
'the distribution of the 15-day dry-spell proportion differences'
result = TestData.join(ReferenceData, how='outer', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
first_idx = max(TestData.first_valid_index(), ReferenceData.first_valid_index())
last_idx = min(TestData.last_valid_index(), ReferenceData.last_valid_index())
result = result.loc[first_idx:last_idx]
result['TestDry'] = (result.Test == 0)
result['ReferenceDry'] = (result.Reference == 0)
result['Test15dayDryCounts'] = result.TestDry.rolling(window='15d').sum()
result['Reference15dayDryCounts'] = result.ReferenceDry.rolling(window='15d').sum()
result['Test15dayObservationCounts'] = result.Test.rolling(window='15d').count()
result['Reference15dayObservationCounts'] = result.Reference.rolling(window='15d').count()
result['15DayDryProportionDifference'] = ((result.Test15dayDryCounts / result.Test15dayObservationCounts) - (result.Reference15dayDryCounts / result.Reference15dayObservationCounts))
result.loc[(((result['Test15dayObservationCounts'] < 360) | (result['Reference15dayObservationCounts'] < 360)), '15DayDryProportionDifference')] = np.nan
if (sum((result['15DayDryProportionDifference'] >= 0)) > 0):
DryProportionDiffereneNinetyFifth = np.quantile(result['15DayDryProportionDifference'][(result['15DayDryProportionDifference'].notna() & (result['15DayDryProportionDifference'] >= 0))], 0.95)
else:
DryProportionDiffereneNinetyFifth = 1
result['DryProportionOutlierIndex'] = np.round((result['15DayDryProportionDifference'] / DryProportionDiffereneNinetyFifth), 1)
result.loc[((result['DryProportionOutlierIndex'] <= 0), 'DryProportionOutlierIndex')] = 0
DrySpellDivergence = result.DryProportionOutlierIndex
return DrySpellDivergence | find the ratio between the 15-day dry spell proportion difference and the ninety-fifth percentile of | src/RainDataChecks.py | DrySpellDivergence | RainfallNZ/RainCheckPy | 0 | python | def DrySpellDivergence(TestData, ReferenceData):
'the distribution of the 15-day dry-spell proportion differences'
result = TestData.join(ReferenceData, how='outer', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
first_idx = max(TestData.first_valid_index(), ReferenceData.first_valid_index())
last_idx = min(TestData.last_valid_index(), ReferenceData.last_valid_index())
result = result.loc[first_idx:last_idx]
result['TestDry'] = (result.Test == 0)
result['ReferenceDry'] = (result.Reference == 0)
result['Test15dayDryCounts'] = result.TestDry.rolling(window='15d').sum()
result['Reference15dayDryCounts'] = result.ReferenceDry.rolling(window='15d').sum()
result['Test15dayObservationCounts'] = result.Test.rolling(window='15d').count()
result['Reference15dayObservationCounts'] = result.Reference.rolling(window='15d').count()
result['15DayDryProportionDifference'] = ((result.Test15dayDryCounts / result.Test15dayObservationCounts) - (result.Reference15dayDryCounts / result.Reference15dayObservationCounts))
result.loc[(((result['Test15dayObservationCounts'] < 360) | (result['Reference15dayObservationCounts'] < 360)), '15DayDryProportionDifference')] = np.nan
if (sum((result['15DayDryProportionDifference'] >= 0)) > 0):
DryProportionDiffereneNinetyFifth = np.quantile(result['15DayDryProportionDifference'][(result['15DayDryProportionDifference'].notna() & (result['15DayDryProportionDifference'] >= 0))], 0.95)
else:
DryProportionDiffereneNinetyFifth = 1
result['DryProportionOutlierIndex'] = np.round((result['15DayDryProportionDifference'] / DryProportionDiffereneNinetyFifth), 1)
result.loc[((result['DryProportionOutlierIndex'] <= 0), 'DryProportionOutlierIndex')] = 0
DrySpellDivergence = result.DryProportionOutlierIndex
return DrySpellDivergence | def DrySpellDivergence(TestData, ReferenceData):
'the distribution of the 15-day dry-spell proportion differences'
result = TestData.join(ReferenceData, how='outer', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
first_idx = max(TestData.first_valid_index(), ReferenceData.first_valid_index())
last_idx = min(TestData.last_valid_index(), ReferenceData.last_valid_index())
result = result.loc[first_idx:last_idx]
result['TestDry'] = (result.Test == 0)
result['ReferenceDry'] = (result.Reference == 0)
result['Test15dayDryCounts'] = result.TestDry.rolling(window='15d').sum()
result['Reference15dayDryCounts'] = result.ReferenceDry.rolling(window='15d').sum()
result['Test15dayObservationCounts'] = result.Test.rolling(window='15d').count()
result['Reference15dayObservationCounts'] = result.Reference.rolling(window='15d').count()
result['15DayDryProportionDifference'] = ((result.Test15dayDryCounts / result.Test15dayObservationCounts) - (result.Reference15dayDryCounts / result.Reference15dayObservationCounts))
result.loc[(((result['Test15dayObservationCounts'] < 360) | (result['Reference15dayObservationCounts'] < 360)), '15DayDryProportionDifference')] = np.nan
if (sum((result['15DayDryProportionDifference'] >= 0)) > 0):
DryProportionDiffereneNinetyFifth = np.quantile(result['15DayDryProportionDifference'][(result['15DayDryProportionDifference'].notna() & (result['15DayDryProportionDifference'] >= 0))], 0.95)
else:
DryProportionDiffereneNinetyFifth = 1
result['DryProportionOutlierIndex'] = np.round((result['15DayDryProportionDifference'] / DryProportionDiffereneNinetyFifth), 1)
result.loc[((result['DryProportionOutlierIndex'] <= 0), 'DryProportionOutlierIndex')] = 0
DrySpellDivergence = result.DryProportionOutlierIndex
return DrySpellDivergence<|docstring|>find the ratio between the 15-day dry spell proportion difference and the ninety-fifth percentile of<|endoftext|> |
f06e8688ac728d9efabb3de664c3d160565395d7c5f9116687342fb80d8fcaf0 | def TimeStepAllignment(TestData, ReferenceData):
'This resamples the ReferenceData to match the observation times of the TestData'
'this helps for comparison to irregularly sampled data (e.g. storage gauges'
'or for manually recorded daily gauges that are read at non- 0:00 hours, e.g. at 8 or 9 am'
result = TestData.join(ReferenceData, how='outer', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
result['aggregator'] = result.index.strftime('%Y-%m-%dT%H:%M%:%SZ')
result.aggregator[result.Test.isna()] = np.nan
result['aggregator'] = result['aggregator'].fillna(method='bfill')
g = result.groupby('aggregator')
Reference_sums = g[['Reference']].aggregate((lambda x: sum(x)))
first_idx = Reference_sums.first_valid_index()
last_idx = Reference_sums.last_valid_index()
Reference_sums = Reference_sums.loc[first_idx:last_idx]
return Reference_sums | This resamples the ReferenceData to match the observation times of the TestData | src/RainDataChecks.py | TimeStepAllignment | RainfallNZ/RainCheckPy | 0 | python | def TimeStepAllignment(TestData, ReferenceData):
'this helps for comparison to irregularly sampled data (e.g. storage gauges'
'or for manually recorded daily gauges that are read at non- 0:00 hours, e.g. at 8 or 9 am'
result = TestData.join(ReferenceData, how='outer', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
result['aggregator'] = result.index.strftime('%Y-%m-%dT%H:%M%:%SZ')
result.aggregator[result.Test.isna()] = np.nan
result['aggregator'] = result['aggregator'].fillna(method='bfill')
g = result.groupby('aggregator')
Reference_sums = g[['Reference']].aggregate((lambda x: sum(x)))
first_idx = Reference_sums.first_valid_index()
last_idx = Reference_sums.last_valid_index()
Reference_sums = Reference_sums.loc[first_idx:last_idx]
return Reference_sums | def TimeStepAllignment(TestData, ReferenceData):
'this helps for comparison to irregularly sampled data (e.g. storage gauges'
'or for manually recorded daily gauges that are read at non- 0:00 hours, e.g. at 8 or 9 am'
result = TestData.join(ReferenceData, how='outer', lsuffix='_Test', rsuffix='_ref')
result.columns = ['Test', 'Reference']
result['aggregator'] = result.index.strftime('%Y-%m-%dT%H:%M%:%SZ')
result.aggregator[result.Test.isna()] = np.nan
result['aggregator'] = result['aggregator'].fillna(method='bfill')
g = result.groupby('aggregator')
Reference_sums = g[['Reference']].aggregate((lambda x: sum(x)))
first_idx = Reference_sums.first_valid_index()
last_idx = Reference_sums.last_valid_index()
Reference_sums = Reference_sums.loc[first_idx:last_idx]
return Reference_sums<|docstring|>This resamples the ReferenceData to match the observation times of the TestData<|endoftext|> |
b83d38adceb84da81e6e3df565e4cf2485c62a5906007c048a970ba78a05252a | def main(unused_argv):
'Main entry point for SDK Fn Harness.'
if ('LOGGING_API_SERVICE_DESCRIPTOR' in os.environ):
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'], logging_service_descriptor)
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
_LOGGER.info('Logging handler created.')
except Exception:
_LOGGER.error('Failed to set up logging handler, continuing without.', exc_info=True)
fn_log_handler = None
else:
fn_log_handler = None
thread = threading.Thread(name='status_http_server', target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if ('PIPELINE_OPTIONS' in os.environ):
sdk_pipeline_options = _parse_pipeline_options(os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if ('SEMI_PERSISTENT_DIRECTORY' in os.environ):
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
_LOGGER.info('semi_persistent_directory: %s', semi_persistent_directory)
_worker_id = os.environ.get('WORKER_ID', None)
try:
_load_main_session(semi_persistent_directory)
except Exception:
exception_details = traceback.format_exc()
_LOGGER.error('Could not load main session: %s', exception_details, exc_info=True)
try:
_LOGGER.info('Python sdk harness started with pipeline_options: %s', sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'], service_descriptor)
assert (not service_descriptor.oauth2_client_credentials_grant.url)
SdkHarness(control_address=service_descriptor.url, worker_id=_worker_id, state_cache_size=_get_state_cache_size(sdk_pipeline_options), profiler_factory=profiler.Profile.factory_from_options(sdk_pipeline_options.view_as(ProfilingOptions))).run()
_LOGGER.info('Python sdk harness exiting.')
except:
_LOGGER.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close() | Main entry point for SDK Fn Harness. | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | main | RyanSkraba/beam | 2 | python | def main(unused_argv):
if ('LOGGING_API_SERVICE_DESCRIPTOR' in os.environ):
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'], logging_service_descriptor)
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
_LOGGER.info('Logging handler created.')
except Exception:
_LOGGER.error('Failed to set up logging handler, continuing without.', exc_info=True)
fn_log_handler = None
else:
fn_log_handler = None
thread = threading.Thread(name='status_http_server', target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if ('PIPELINE_OPTIONS' in os.environ):
sdk_pipeline_options = _parse_pipeline_options(os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if ('SEMI_PERSISTENT_DIRECTORY' in os.environ):
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
_LOGGER.info('semi_persistent_directory: %s', semi_persistent_directory)
_worker_id = os.environ.get('WORKER_ID', None)
try:
_load_main_session(semi_persistent_directory)
except Exception:
exception_details = traceback.format_exc()
_LOGGER.error('Could not load main session: %s', exception_details, exc_info=True)
try:
_LOGGER.info('Python sdk harness started with pipeline_options: %s', sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'], service_descriptor)
assert (not service_descriptor.oauth2_client_credentials_grant.url)
SdkHarness(control_address=service_descriptor.url, worker_id=_worker_id, state_cache_size=_get_state_cache_size(sdk_pipeline_options), profiler_factory=profiler.Profile.factory_from_options(sdk_pipeline_options.view_as(ProfilingOptions))).run()
_LOGGER.info('Python sdk harness exiting.')
except:
_LOGGER.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close() | def main(unused_argv):
if ('LOGGING_API_SERVICE_DESCRIPTOR' in os.environ):
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'], logging_service_descriptor)
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
_LOGGER.info('Logging handler created.')
except Exception:
_LOGGER.error('Failed to set up logging handler, continuing without.', exc_info=True)
fn_log_handler = None
else:
fn_log_handler = None
thread = threading.Thread(name='status_http_server', target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if ('PIPELINE_OPTIONS' in os.environ):
sdk_pipeline_options = _parse_pipeline_options(os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if ('SEMI_PERSISTENT_DIRECTORY' in os.environ):
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
_LOGGER.info('semi_persistent_directory: %s', semi_persistent_directory)
_worker_id = os.environ.get('WORKER_ID', None)
try:
_load_main_session(semi_persistent_directory)
except Exception:
exception_details = traceback.format_exc()
_LOGGER.error('Could not load main session: %s', exception_details, exc_info=True)
try:
_LOGGER.info('Python sdk harness started with pipeline_options: %s', sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'], service_descriptor)
assert (not service_descriptor.oauth2_client_credentials_grant.url)
SdkHarness(control_address=service_descriptor.url, worker_id=_worker_id, state_cache_size=_get_state_cache_size(sdk_pipeline_options), profiler_factory=profiler.Profile.factory_from_options(sdk_pipeline_options.view_as(ProfilingOptions))).run()
_LOGGER.info('Python sdk harness exiting.')
except:
_LOGGER.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()<|docstring|>Main entry point for SDK Fn Harness.<|endoftext|> |
8ad5ba6d20c7ea9daa9c979b39aaa240fe3b6464cb08210fc001a9550f24282e | def _get_state_cache_size(pipeline_options):
'Defines the upper number of state items to cache.\n\n Note: state_cache_size is an experimental flag and might not be available in\n future releases.\n\n Returns:\n an int indicating the maximum number of items to cache.\n Default is 0 (disabled)\n '
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = (experiments if experiments else [])
for experiment in experiments:
if re.match('state_cache_size=', experiment):
return int(re.match('state_cache_size=(?P<state_cache_size>.*)', experiment).group('state_cache_size'))
return 0 | Defines the upper number of state items to cache.
Note: state_cache_size is an experimental flag and might not be available in
future releases.
Returns:
an int indicating the maximum number of items to cache.
Default is 0 (disabled) | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | _get_state_cache_size | RyanSkraba/beam | 2 | python | def _get_state_cache_size(pipeline_options):
'Defines the upper number of state items to cache.\n\n Note: state_cache_size is an experimental flag and might not be available in\n future releases.\n\n Returns:\n an int indicating the maximum number of items to cache.\n Default is 0 (disabled)\n '
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = (experiments if experiments else [])
for experiment in experiments:
if re.match('state_cache_size=', experiment):
return int(re.match('state_cache_size=(?P<state_cache_size>.*)', experiment).group('state_cache_size'))
return 0 | def _get_state_cache_size(pipeline_options):
'Defines the upper number of state items to cache.\n\n Note: state_cache_size is an experimental flag and might not be available in\n future releases.\n\n Returns:\n an int indicating the maximum number of items to cache.\n Default is 0 (disabled)\n '
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = (experiments if experiments else [])
for experiment in experiments:
if re.match('state_cache_size=', experiment):
return int(re.match('state_cache_size=(?P<state_cache_size>.*)', experiment).group('state_cache_size'))
return 0<|docstring|>Defines the upper number of state items to cache.
Note: state_cache_size is an experimental flag and might not be available in
future releases.
Returns:
an int indicating the maximum number of items to cache.
Default is 0 (disabled)<|endoftext|> |
cdf0d0f12f21119ab943418855a04ddae4a63c620f438934bbfe17a54ced0863 | def _load_main_session(semi_persistent_directory):
'Loads a pickled main session from the path specified.'
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged', names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
_LOGGER.warning('No session file found: %s. Functions defined in __main__ (interactive session) may fail.', session_file)
else:
_LOGGER.warning('No semi_persistent_directory found: Functions defined in __main__ (interactive session) may fail.') | Loads a pickled main session from the path specified. | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | _load_main_session | RyanSkraba/beam | 2 | python | def _load_main_session(semi_persistent_directory):
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged', names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
_LOGGER.warning('No session file found: %s. Functions defined in __main__ (interactive session) may fail.', session_file)
else:
_LOGGER.warning('No semi_persistent_directory found: Functions defined in __main__ (interactive session) may fail.') | def _load_main_session(semi_persistent_directory):
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged', names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
_LOGGER.warning('No session file found: %s. Functions defined in __main__ (interactive session) may fail.', session_file)
else:
_LOGGER.warning('No semi_persistent_directory found: Functions defined in __main__ (interactive session) may fail.')<|docstring|>Loads a pickled main session from the path specified.<|endoftext|> |
b2de4aa4ead16a315367867a5662c055383c2e2dec01ba0da999b8efd3956282 | def start(self, status_http_port=0):
'Executes the serving loop for the status server.\n\n Args:\n status_http_port(int): Binding port for the debug server.\n Default is 0 which means any free unsecured port\n '
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
'HTTP handler for serving stacktraces of all threads.'
def do_GET(self):
'Return all thread stacktraces information for GET request.'
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line.encode('utf-8'))
def log_message(self, f, *args):
'Do not log any messages.'
pass
self.httpd = httpd = http.server.HTTPServer(('localhost', status_http_port), StatusHttpHandler)
_LOGGER.info('Status HTTP server running at %s:%s', httpd.server_name, httpd.server_port)
httpd.serve_forever() | Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | start | RyanSkraba/beam | 2 | python | def start(self, status_http_port=0):
'Executes the serving loop for the status server.\n\n Args:\n status_http_port(int): Binding port for the debug server.\n Default is 0 which means any free unsecured port\n '
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
'HTTP handler for serving stacktraces of all threads.'
def do_GET(self):
'Return all thread stacktraces information for GET request.'
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line.encode('utf-8'))
def log_message(self, f, *args):
'Do not log any messages.'
pass
self.httpd = httpd = http.server.HTTPServer(('localhost', status_http_port), StatusHttpHandler)
_LOGGER.info('Status HTTP server running at %s:%s', httpd.server_name, httpd.server_port)
httpd.serve_forever() | def start(self, status_http_port=0):
'Executes the serving loop for the status server.\n\n Args:\n status_http_port(int): Binding port for the debug server.\n Default is 0 which means any free unsecured port\n '
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
'HTTP handler for serving stacktraces of all threads.'
def do_GET(self):
'Return all thread stacktraces information for GET request.'
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line.encode('utf-8'))
def log_message(self, f, *args):
'Do not log any messages.'
pass
self.httpd = httpd = http.server.HTTPServer(('localhost', status_http_port), StatusHttpHandler)
_LOGGER.info('Status HTTP server running at %s:%s', httpd.server_name, httpd.server_port)
httpd.serve_forever()<|docstring|>Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port<|endoftext|> |
9cc5f72b47c0cc9ff8b5caff2d428ec47ed9f5ede9015d19c794e7b1fde4d431 | def do_GET(self):
'Return all thread stacktraces information for GET request.'
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line.encode('utf-8')) | Return all thread stacktraces information for GET request. | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | do_GET | RyanSkraba/beam | 2 | python | def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line.encode('utf-8')) | def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line.encode('utf-8'))<|docstring|>Return all thread stacktraces information for GET request.<|endoftext|> |
d14f194a3d945540bc2602aa3145eb562288d83a7e22e95038580d56ed6e14e2 | def log_message(self, f, *args):
'Do not log any messages.'
pass | Do not log any messages. | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | log_message | RyanSkraba/beam | 2 | python | def log_message(self, f, *args):
pass | def log_message(self, f, *args):
pass<|docstring|>Do not log any messages.<|endoftext|> |
c61a3533ac3b5d6a5f7974f5957dc4a48e4a455a5c2ca4c3b68ec8a33ecb9dee | def __init__(self, num_clf=100, max_gene=NUM_GENE, dir_path=DIR_PATH):
'\n Parameters\n ----------\n num_clf: int\n number of classifiers in ensemble\n max_gene: int\n Maximum number of genes considerd\n dir_path: str\n Directory where files are saved and read\n '
self.num_clf = num_clf
self.max_gene = max_gene
self.dir_path = dir_path
classification_data = ClassificationData()
self.namespace_dct = classification_data.getDct()
self.data_dct = self.namespace_dct['DATA_DCT']
self.gene_dct = self.namespace_dct['GENE_DCT']
self.classifier_dct = self.namespace_dct['CLASSIFIER_DCT']
self._dataframe = None | Parameters
----------
num_clf: int
number of classifiers in ensemble
max_gene: int
Maximum number of genes considerd
dir_path: str
Directory where files are saved and read | xstate/python/tools/cross_validation_data.py | __init__ | uwescience/new_xstate | 0 | python | def __init__(self, num_clf=100, max_gene=NUM_GENE, dir_path=DIR_PATH):
'\n Parameters\n ----------\n num_clf: int\n number of classifiers in ensemble\n max_gene: int\n Maximum number of genes considerd\n dir_path: str\n Directory where files are saved and read\n '
self.num_clf = num_clf
self.max_gene = max_gene
self.dir_path = dir_path
classification_data = ClassificationData()
self.namespace_dct = classification_data.getDct()
self.data_dct = self.namespace_dct['DATA_DCT']
self.gene_dct = self.namespace_dct['GENE_DCT']
self.classifier_dct = self.namespace_dct['CLASSIFIER_DCT']
self._dataframe = None | def __init__(self, num_clf=100, max_gene=NUM_GENE, dir_path=DIR_PATH):
'\n Parameters\n ----------\n num_clf: int\n number of classifiers in ensemble\n max_gene: int\n Maximum number of genes considerd\n dir_path: str\n Directory where files are saved and read\n '
self.num_clf = num_clf
self.max_gene = max_gene
self.dir_path = dir_path
classification_data = ClassificationData()
self.namespace_dct = classification_data.getDct()
self.data_dct = self.namespace_dct['DATA_DCT']
self.gene_dct = self.namespace_dct['GENE_DCT']
self.classifier_dct = self.namespace_dct['CLASSIFIER_DCT']
self._dataframe = None<|docstring|>Parameters
----------
num_clf: int
number of classifiers in ensemble
max_gene: int
Maximum number of genes considerd
dir_path: str
Directory where files are saved and read<|endoftext|> |
5c34327f271bc8afb8f2d805c301594b4650b9ea20759c48384561dee8e08965 | @property
def dataframe(self):
'\n Constructs the dataframe of cross validation data\n\n Parameters\n ----------\n base_name: str\n\n Returns\n -------\n pd.DataFrame\n '
if (self._dataframe is None):
files = self._getPaths()
dfs = []
for ffile in files:
df = pd.read_csv(ffile)
dfs.append(df)
self._dataframe = pd.concat(dfs, axis=1)
del_columns = [c for c in self._dataframe.columns if ('Unnamed:' in c)]
for column in del_columns:
if (column in self._dataframe.columns):
del self._dataframe[column]
self._dataframe.index = list(range(1, (len(self._dataframe) + 1)))
self._dataframe.index.name = 'num_gene'
return self._dataframe | Constructs the dataframe of cross validation data
Parameters
----------
base_name: str
Returns
-------
pd.DataFrame | xstate/python/tools/cross_validation_data.py | dataframe | uwescience/new_xstate | 0 | python | @property
def dataframe(self):
'\n Constructs the dataframe of cross validation data\n\n Parameters\n ----------\n base_name: str\n\n Returns\n -------\n pd.DataFrame\n '
if (self._dataframe is None):
files = self._getPaths()
dfs = []
for ffile in files:
df = pd.read_csv(ffile)
dfs.append(df)
self._dataframe = pd.concat(dfs, axis=1)
del_columns = [c for c in self._dataframe.columns if ('Unnamed:' in c)]
for column in del_columns:
if (column in self._dataframe.columns):
del self._dataframe[column]
self._dataframe.index = list(range(1, (len(self._dataframe) + 1)))
self._dataframe.index.name = 'num_gene'
return self._dataframe | @property
def dataframe(self):
'\n Constructs the dataframe of cross validation data\n\n Parameters\n ----------\n base_name: str\n\n Returns\n -------\n pd.DataFrame\n '
if (self._dataframe is None):
files = self._getPaths()
dfs = []
for ffile in files:
df = pd.read_csv(ffile)
dfs.append(df)
self._dataframe = pd.concat(dfs, axis=1)
del_columns = [c for c in self._dataframe.columns if ('Unnamed:' in c)]
for column in del_columns:
if (column in self._dataframe.columns):
del self._dataframe[column]
self._dataframe.index = list(range(1, (len(self._dataframe) + 1)))
self._dataframe.index.name = 'num_gene'
return self._dataframe<|docstring|>Constructs the dataframe of cross validation data
Parameters
----------
base_name: str
Returns
-------
pd.DataFrame<|endoftext|> |
b060cc3ec700c036efb2b6e7124cf6763a4c07c43193c9f9b3db510224f234f8 | def make(self, indices=None, num_iter=10):
'\n Creates the data needed for accuracy plots based on cross validations.\n\n Parameters\n ----------\n indices: list-int\n Indicies of keys to process\n num_iter: int\n Number of iterations of cross validation\n\n Returns\n -------\n pd.DataFrame\n index: int\n maximum importance rank of the gene used to construct the classifier\n column: str (classifier name)\n '
ranks = list(range(1, (self.max_gene + 1)))
columns = []
result_dct = {}
if (indices is None):
classifier_dct = self.classifier_dct
else:
keys = list(self.classifier_dct.keys())
classifier_dct = {k: self.classifier_dct[k] for k in keys if (keys.index(k) in indices)}
for (key, clf) in classifier_dct.items():
classifier_name = '--'.join(key)
result_dct[classifier_name] = []
columns.append(classifier_name)
trinary = copy.deepcopy(self.data_dct[key[0]])
trinary.df_X = dataframe.subset(trinary.df_X, self.gene_dct[key[1]])
for rank in ranks:
accuracy = clf.crossValidate(trinary, num_iter=num_iter, num_holdout=1, filter_high_rank=rank, size=self.num_clf)
result_dct[classifier_name].append(accuracy)
df = pd.DataFrame(result_dct, index=ranks)
if (self.dir_path is not None):
path = self._makePath(indices)
df.to_csv(path, index=False)
return df | Creates the data needed for accuracy plots based on cross validations.
Parameters
----------
indices: list-int
Indicies of keys to process
num_iter: int
Number of iterations of cross validation
Returns
-------
pd.DataFrame
index: int
maximum importance rank of the gene used to construct the classifier
column: str (classifier name) | xstate/python/tools/cross_validation_data.py | make | uwescience/new_xstate | 0 | python | def make(self, indices=None, num_iter=10):
'\n Creates the data needed for accuracy plots based on cross validations.\n\n Parameters\n ----------\n indices: list-int\n Indicies of keys to process\n num_iter: int\n Number of iterations of cross validation\n\n Returns\n -------\n pd.DataFrame\n index: int\n maximum importance rank of the gene used to construct the classifier\n column: str (classifier name)\n '
ranks = list(range(1, (self.max_gene + 1)))
columns = []
result_dct = {}
if (indices is None):
classifier_dct = self.classifier_dct
else:
keys = list(self.classifier_dct.keys())
classifier_dct = {k: self.classifier_dct[k] for k in keys if (keys.index(k) in indices)}
for (key, clf) in classifier_dct.items():
classifier_name = '--'.join(key)
result_dct[classifier_name] = []
columns.append(classifier_name)
trinary = copy.deepcopy(self.data_dct[key[0]])
trinary.df_X = dataframe.subset(trinary.df_X, self.gene_dct[key[1]])
for rank in ranks:
accuracy = clf.crossValidate(trinary, num_iter=num_iter, num_holdout=1, filter_high_rank=rank, size=self.num_clf)
result_dct[classifier_name].append(accuracy)
df = pd.DataFrame(result_dct, index=ranks)
if (self.dir_path is not None):
path = self._makePath(indices)
df.to_csv(path, index=False)
return df | def make(self, indices=None, num_iter=10):
'\n Creates the data needed for accuracy plots based on cross validations.\n\n Parameters\n ----------\n indices: list-int\n Indicies of keys to process\n num_iter: int\n Number of iterations of cross validation\n\n Returns\n -------\n pd.DataFrame\n index: int\n maximum importance rank of the gene used to construct the classifier\n column: str (classifier name)\n '
ranks = list(range(1, (self.max_gene + 1)))
columns = []
result_dct = {}
if (indices is None):
classifier_dct = self.classifier_dct
else:
keys = list(self.classifier_dct.keys())
classifier_dct = {k: self.classifier_dct[k] for k in keys if (keys.index(k) in indices)}
for (key, clf) in classifier_dct.items():
classifier_name = '--'.join(key)
result_dct[classifier_name] = []
columns.append(classifier_name)
trinary = copy.deepcopy(self.data_dct[key[0]])
trinary.df_X = dataframe.subset(trinary.df_X, self.gene_dct[key[1]])
for rank in ranks:
accuracy = clf.crossValidate(trinary, num_iter=num_iter, num_holdout=1, filter_high_rank=rank, size=self.num_clf)
result_dct[classifier_name].append(accuracy)
df = pd.DataFrame(result_dct, index=ranks)
if (self.dir_path is not None):
path = self._makePath(indices)
df.to_csv(path, index=False)
return df<|docstring|>Creates the data needed for accuracy plots based on cross validations.
Parameters
----------
indices: list-int
Indicies of keys to process
num_iter: int
Number of iterations of cross validation
Returns
-------
pd.DataFrame
index: int
maximum importance rank of the gene used to construct the classifier
column: str (classifier name)<|endoftext|> |
fdc2546cefbfb9f41d2089d5b761566bcefffb923c787d4da50c9453252de68e | def _makePath(self, indices):
'\n Constructs the path for the indices.\n\n Parameters\n ----------\n indices: list-int\n\n Returns\n -------\n Path\n '
sfx = '_'.join([str(v) for v in indices])
filename = ('%s_%s.%s' % (CV_CALCULATION_FILENAME, sfx, CSV))
return Path(os.path.join(self.dir_path, filename)) | Constructs the path for the indices.
Parameters
----------
indices: list-int
Returns
-------
Path | xstate/python/tools/cross_validation_data.py | _makePath | uwescience/new_xstate | 0 | python | def _makePath(self, indices):
'\n Constructs the path for the indices.\n\n Parameters\n ----------\n indices: list-int\n\n Returns\n -------\n Path\n '
sfx = '_'.join([str(v) for v in indices])
filename = ('%s_%s.%s' % (CV_CALCULATION_FILENAME, sfx, CSV))
return Path(os.path.join(self.dir_path, filename)) | def _makePath(self, indices):
'\n Constructs the path for the indices.\n\n Parameters\n ----------\n indices: list-int\n\n Returns\n -------\n Path\n '
sfx = '_'.join([str(v) for v in indices])
filename = ('%s_%s.%s' % (CV_CALCULATION_FILENAME, sfx, CSV))
return Path(os.path.join(self.dir_path, filename))<|docstring|>Constructs the path for the indices.
Parameters
----------
indices: list-int
Returns
-------
Path<|endoftext|> |
c4627305212b25d7c07ed3bfb18b7e9d7e791e3b0482106dfad3a1a407fdbfb3 | def _getPaths(self):
'\n Gets the cross validation files in the directory.\n\n Returns\n -------\n list-Path\n '
def check(ffile):
ffile = str(ffile)
return ((CV_CALCULATION_FILENAME in ffile) & (CSV in ffile))
paths = os.listdir(self.dir_path)
paths = [os.path.join(self.dir_path, f) for f in paths if check(f)]
return paths | Gets the cross validation files in the directory.
Returns
-------
list-Path | xstate/python/tools/cross_validation_data.py | _getPaths | uwescience/new_xstate | 0 | python | def _getPaths(self):
'\n Gets the cross validation files in the directory.\n\n Returns\n -------\n list-Path\n '
def check(ffile):
ffile = str(ffile)
return ((CV_CALCULATION_FILENAME in ffile) & (CSV in ffile))
paths = os.listdir(self.dir_path)
paths = [os.path.join(self.dir_path, f) for f in paths if check(f)]
return paths | def _getPaths(self):
'\n Gets the cross validation files in the directory.\n\n Returns\n -------\n list-Path\n '
def check(ffile):
ffile = str(ffile)
return ((CV_CALCULATION_FILENAME in ffile) & (CSV in ffile))
paths = os.listdir(self.dir_path)
paths = [os.path.join(self.dir_path, f) for f in paths if check(f)]
return paths<|docstring|>Gets the cross validation files in the directory.
Returns
-------
list-Path<|endoftext|> |
33c0004be926a2dc368570ad65c1d43762b2e9d43d716ba86387dddae9d9b6af | def clean(self):
'\n Removes all existing cross validation files.\n '
ffiles = self._getPaths()
for ffile in ffiles:
os.remove(ffile) | Removes all existing cross validation files. | xstate/python/tools/cross_validation_data.py | clean | uwescience/new_xstate | 0 | python | def clean(self):
'\n \n '
ffiles = self._getPaths()
for ffile in ffiles:
os.remove(ffile) | def clean(self):
'\n \n '
ffiles = self._getPaths()
for ffile in ffiles:
os.remove(ffile)<|docstring|>Removes all existing cross validation files.<|endoftext|> |
0ae9fe5279f90bf48959feeed18888bd1e4948ab6eeac658b7537d26c561ff26 | def __init__(self, data, *axes, uncertainty=None, labels=None, units=None):
' Creates a MeshData instance.\n\n Parameters\n ----------\n data : ndarray\n A at least two-dimensional array containing the data.\n *axes : ndarray\n Arrays specifying the coordinates of the data axes. Must be given\n in indexing order.\n uncertainty : ndarray\n An ndarray of the same size as `data` that contains some measure\n of the uncertainty of the meshdata. E.g., it could be the standard\n deviation of the data.\n labels : list of str, optional\n A list of strings labeling the axes. The last element labels the\n data itself, e.g. ``labels`` must have one more element than the\n number of axes.\n units : list of str, optional\n A list of unit strings.\n '
self.data = data.copy()
self.axes = [np.array(a).copy() for a in axes]
if (uncertainty is not None):
self.uncertainty = uncertainty.copy()
else:
self.uncertainty = None
if (self.ndim != len(axes)):
raise ValueError('Number of supplied axes is wrong!')
if (self.shape != tuple((ax.size for ax in self.axes))):
raise ValueError('Shape of supplied axes is wrong!')
self.labels = labels
if (self.labels is None):
self.labels = ['' for ax in self.axes]
self.units = units
if (self.units is None):
self.units = ['' for ax in self.axes] | Creates a MeshData instance.
Parameters
----------
data : ndarray
A at least two-dimensional array containing the data.
*axes : ndarray
Arrays specifying the coordinates of the data axes. Must be given
in indexing order.
uncertainty : ndarray
An ndarray of the same size as `data` that contains some measure
of the uncertainty of the meshdata. E.g., it could be the standard
deviation of the data.
labels : list of str, optional
A list of strings labeling the axes. The last element labels the
data itself, e.g. ``labels`` must have one more element than the
number of axes.
units : list of str, optional
A list of unit strings. | pypret/mesh_data.py | __init__ | QF06/pypret | 36 | python | def __init__(self, data, *axes, uncertainty=None, labels=None, units=None):
' Creates a MeshData instance.\n\n Parameters\n ----------\n data : ndarray\n A at least two-dimensional array containing the data.\n *axes : ndarray\n Arrays specifying the coordinates of the data axes. Must be given\n in indexing order.\n uncertainty : ndarray\n An ndarray of the same size as `data` that contains some measure\n of the uncertainty of the meshdata. E.g., it could be the standard\n deviation of the data.\n labels : list of str, optional\n A list of strings labeling the axes. The last element labels the\n data itself, e.g. ``labels`` must have one more element than the\n number of axes.\n units : list of str, optional\n A list of unit strings.\n '
self.data = data.copy()
self.axes = [np.array(a).copy() for a in axes]
if (uncertainty is not None):
self.uncertainty = uncertainty.copy()
else:
self.uncertainty = None
if (self.ndim != len(axes)):
raise ValueError('Number of supplied axes is wrong!')
if (self.shape != tuple((ax.size for ax in self.axes))):
raise ValueError('Shape of supplied axes is wrong!')
self.labels = labels
if (self.labels is None):
self.labels = [ for ax in self.axes]
self.units = units
if (self.units is None):
self.units = [ for ax in self.axes] | def __init__(self, data, *axes, uncertainty=None, labels=None, units=None):
' Creates a MeshData instance.\n\n Parameters\n ----------\n data : ndarray\n A at least two-dimensional array containing the data.\n *axes : ndarray\n Arrays specifying the coordinates of the data axes. Must be given\n in indexing order.\n uncertainty : ndarray\n An ndarray of the same size as `data` that contains some measure\n of the uncertainty of the meshdata. E.g., it could be the standard\n deviation of the data.\n labels : list of str, optional\n A list of strings labeling the axes. The last element labels the\n data itself, e.g. ``labels`` must have one more element than the\n number of axes.\n units : list of str, optional\n A list of unit strings.\n '
self.data = data.copy()
self.axes = [np.array(a).copy() for a in axes]
if (uncertainty is not None):
self.uncertainty = uncertainty.copy()
else:
self.uncertainty = None
if (self.ndim != len(axes)):
raise ValueError('Number of supplied axes is wrong!')
if (self.shape != tuple((ax.size for ax in self.axes))):
raise ValueError('Shape of supplied axes is wrong!')
self.labels = labels
if (self.labels is None):
self.labels = [ for ax in self.axes]
self.units = units
if (self.units is None):
self.units = [ for ax in self.axes]<|docstring|>Creates a MeshData instance.
Parameters
----------
data : ndarray
A at least two-dimensional array containing the data.
*axes : ndarray
Arrays specifying the coordinates of the data axes. Must be given
in indexing order.
uncertainty : ndarray
An ndarray of the same size as `data` that contains some measure
of the uncertainty of the meshdata. E.g., it could be the standard
deviation of the data.
labels : list of str, optional
A list of strings labeling the axes. The last element labels the
data itself, e.g. ``labels`` must have one more element than the
number of axes.
units : list of str, optional
A list of unit strings.<|endoftext|> |
07e0a574b0258915365f80e28087367f113d88dba243077988989e66d93d181c | @property
def shape(self):
' Returns the shape of the data as a tuple.\n '
return self.data.shape | Returns the shape of the data as a tuple. | pypret/mesh_data.py | shape | QF06/pypret | 36 | python | @property
def shape(self):
' \n '
return self.data.shape | @property
def shape(self):
' \n '
return self.data.shape<|docstring|>Returns the shape of the data as a tuple.<|endoftext|> |
f2024bfb58433861c93693c39a5aae93ef2e1a0af8c120a08060eab425856794 | @property
def ndim(self):
' Returns the dimension of the data as integer.\n '
return self.data.ndim | Returns the dimension of the data as integer. | pypret/mesh_data.py | ndim | QF06/pypret | 36 | python | @property
def ndim(self):
' \n '
return self.data.ndim | @property
def ndim(self):
' \n '
return self.data.ndim<|docstring|>Returns the dimension of the data as integer.<|endoftext|> |
32fc8e3be227329f6f12668864e82cc9be380516326f67e2df91bedd49b8491a | def copy(self):
' Creates a copy of the MeshData instance. '
return MeshData(self.data, *self.axes, uncertainty=self.uncertainty, labels=self.labels, units=self.units) | Creates a copy of the MeshData instance. | pypret/mesh_data.py | copy | QF06/pypret | 36 | python | def copy(self):
' '
return MeshData(self.data, *self.axes, uncertainty=self.uncertainty, labels=self.labels, units=self.units) | def copy(self):
' '
return MeshData(self.data, *self.axes, uncertainty=self.uncertainty, labels=self.labels, units=self.units)<|docstring|>Creates a copy of the MeshData instance.<|endoftext|> |
ca869e28cc0f9c13d074420c540992033cdadbe403386805ac3acc4c995129db | def marginals(self, normalize=False, axes=None):
' Calculates the marginals of the data.\n\n axes specifies the axes of the marginals, e.g., the axes on which the\n sum is projected.\n '
return lib.marginals(self.data, normalize=normalize, axes=axes) | Calculates the marginals of the data.
axes specifies the axes of the marginals, e.g., the axes on which the
sum is projected. | pypret/mesh_data.py | marginals | QF06/pypret | 36 | python | def marginals(self, normalize=False, axes=None):
' Calculates the marginals of the data.\n\n axes specifies the axes of the marginals, e.g., the axes on which the\n sum is projected.\n '
return lib.marginals(self.data, normalize=normalize, axes=axes) | def marginals(self, normalize=False, axes=None):
' Calculates the marginals of the data.\n\n axes specifies the axes of the marginals, e.g., the axes on which the\n sum is projected.\n '
return lib.marginals(self.data, normalize=normalize, axes=axes)<|docstring|>Calculates the marginals of the data.
axes specifies the axes of the marginals, e.g., the axes on which the
sum is projected.<|endoftext|> |
aac99504d9374af8c5c829b51a18b16e7a5b5b882af7493e192f64deaf49331a | def normalize(self):
' Normalizes the maximum of the data to 1.\n '
self.scale((1.0 / self.data.max())) | Normalizes the maximum of the data to 1. | pypret/mesh_data.py | normalize | QF06/pypret | 36 | python | def normalize(self):
' \n '
self.scale((1.0 / self.data.max())) | def normalize(self):
' \n '
self.scale((1.0 / self.data.max()))<|docstring|>Normalizes the maximum of the data to 1.<|endoftext|> |
b642a4ec6fa5db6ab97c10542f0013a6a2fee50fb91f7160fde0e161a021a559 | def autolimit(self, *axes, threshold=0.01, padding=0.25):
' Limits the data based on the marginals.\n '
if (len(axes) == 0):
axes = list(range(self.ndim))
marginals = lib.marginals(self.data)
limits = []
for (i, j) in enumerate(axes):
limit = lib.limit(self.axes[j], marginals[j], threshold=threshold, padding=padding)
limits.append(limit)
self.limit(*limits, axes=axes) | Limits the data based on the marginals. | pypret/mesh_data.py | autolimit | QF06/pypret | 36 | python | def autolimit(self, *axes, threshold=0.01, padding=0.25):
' \n '
if (len(axes) == 0):
axes = list(range(self.ndim))
marginals = lib.marginals(self.data)
limits = []
for (i, j) in enumerate(axes):
limit = lib.limit(self.axes[j], marginals[j], threshold=threshold, padding=padding)
limits.append(limit)
self.limit(*limits, axes=axes) | def autolimit(self, *axes, threshold=0.01, padding=0.25):
' \n '
if (len(axes) == 0):
axes = list(range(self.ndim))
marginals = lib.marginals(self.data)
limits = []
for (i, j) in enumerate(axes):
limit = lib.limit(self.axes[j], marginals[j], threshold=threshold, padding=padding)
limits.append(limit)
self.limit(*limits, axes=axes)<|docstring|>Limits the data based on the marginals.<|endoftext|> |
14f49b5385f4fae1c04b4ccdf5663562f57d3216bbec8f7f5fed5f2733132b19 | def limit(self, *limits, axes=None):
' Limits the data range of this instance.\n\n Parameters\n ----------\n *limits : tuples\n The data limits in the axes as tuples. Has to match the dimension\n of the data or the number of axes specified in the `axes`\n parameter.\n axes : tuple or None\n The axes in which the limit is applied. Default is `None` in which\n case all axes are selected.\n '
if (axes is None):
axes = list(range(self.ndim))
axes = lib.as_list(axes)
if (len(axes) != len(limits)):
raise ValueError('Number of limits must match the specified axes!')
slices = []
for j in range(self.ndim):
if (j in axes):
i = axes.index(j)
ax = self.axes[j]
(x1, x2) = limits[i]
idx1 = np.argmin(np.abs((ax - x1)))
idx2 = np.argmin(np.abs((ax - x2)))
if (idx1 > idx2):
(idx1, idx2) = (idx2, idx1)
elif (idx1 == idx2):
raise ValueError(('Selected empty slice along axis %d!' % i))
slices.append(slice(idx1, (idx2 + 1)))
else:
slices.append(slice(None))
self.axes[j] = self.axes[j][slices[(- 1)]]
self.data = self.data[(*slices,)]
if (self.uncertainty is not None):
self.uncertainty = self.uncertainty[(*slices,)] | Limits the data range of this instance.
Parameters
----------
*limits : tuples
The data limits in the axes as tuples. Has to match the dimension
of the data or the number of axes specified in the `axes`
parameter.
axes : tuple or None
The axes in which the limit is applied. Default is `None` in which
case all axes are selected. | pypret/mesh_data.py | limit | QF06/pypret | 36 | python | def limit(self, *limits, axes=None):
' Limits the data range of this instance.\n\n Parameters\n ----------\n *limits : tuples\n The data limits in the axes as tuples. Has to match the dimension\n of the data or the number of axes specified in the `axes`\n parameter.\n axes : tuple or None\n The axes in which the limit is applied. Default is `None` in which\n case all axes are selected.\n '
if (axes is None):
axes = list(range(self.ndim))
axes = lib.as_list(axes)
if (len(axes) != len(limits)):
raise ValueError('Number of limits must match the specified axes!')
slices = []
for j in range(self.ndim):
if (j in axes):
i = axes.index(j)
ax = self.axes[j]
(x1, x2) = limits[i]
idx1 = np.argmin(np.abs((ax - x1)))
idx2 = np.argmin(np.abs((ax - x2)))
if (idx1 > idx2):
(idx1, idx2) = (idx2, idx1)
elif (idx1 == idx2):
raise ValueError(('Selected empty slice along axis %d!' % i))
slices.append(slice(idx1, (idx2 + 1)))
else:
slices.append(slice(None))
self.axes[j] = self.axes[j][slices[(- 1)]]
self.data = self.data[(*slices,)]
if (self.uncertainty is not None):
self.uncertainty = self.uncertainty[(*slices,)] | def limit(self, *limits, axes=None):
' Limits the data range of this instance.\n\n Parameters\n ----------\n *limits : tuples\n The data limits in the axes as tuples. Has to match the dimension\n of the data or the number of axes specified in the `axes`\n parameter.\n axes : tuple or None\n The axes in which the limit is applied. Default is `None` in which\n case all axes are selected.\n '
if (axes is None):
axes = list(range(self.ndim))
axes = lib.as_list(axes)
if (len(axes) != len(limits)):
raise ValueError('Number of limits must match the specified axes!')
slices = []
for j in range(self.ndim):
if (j in axes):
i = axes.index(j)
ax = self.axes[j]
(x1, x2) = limits[i]
idx1 = np.argmin(np.abs((ax - x1)))
idx2 = np.argmin(np.abs((ax - x2)))
if (idx1 > idx2):
(idx1, idx2) = (idx2, idx1)
elif (idx1 == idx2):
raise ValueError(('Selected empty slice along axis %d!' % i))
slices.append(slice(idx1, (idx2 + 1)))
else:
slices.append(slice(None))
self.axes[j] = self.axes[j][slices[(- 1)]]
self.data = self.data[(*slices,)]
if (self.uncertainty is not None):
self.uncertainty = self.uncertainty[(*slices,)]<|docstring|>Limits the data range of this instance.
Parameters
----------
*limits : tuples
The data limits in the axes as tuples. Has to match the dimension
of the data or the number of axes specified in the `axes`
parameter.
axes : tuple or None
The axes in which the limit is applied. Default is `None` in which
case all axes are selected.<|endoftext|> |
407bbd981469c21d8272eae4fe7b6d81401170ac80a18930326c83cd87f1100a | def interpolate(self, axis1=None, axis2=None, degree=2, sorted=False):
' Interpolates the data on a new two-dimensional, equidistantly\n spaced grid.\n '
axes = [axis1, axis2]
for i in range(self.ndim):
if (axes[i] is None):
axes[i] = self.axes[i]
orig_axes = self.axes
data = self.data.copy()
if (self.uncertainty is not None):
uncertainty = self.uncertainty.copy()
if (not sorted):
for i in range(len(orig_axes)):
idx = np.argsort(orig_axes[i])
orig_axes[i] = orig_axes[i][idx]
data = np.take(data, idx, axis=i)
if (self.uncertainty is not None):
uncertainty = np.take(uncertainty, idx, axis=i)
dataf = RegularGridInterpolator(tuple(orig_axes), data, bounds_error=False, fill_value=0.0)
grid = lib.build_coords(*axes)
self.data = dataf(grid)
self.axes = axes
if (self.uncertainty is not None):
dataf = RegularGridInterpolator(tuple(orig_axes), uncertainty, bounds_error=False, fill_value=0.0)
self.uncertainty = dataf(grid) | Interpolates the data on a new two-dimensional, equidistantly
spaced grid. | pypret/mesh_data.py | interpolate | QF06/pypret | 36 | python | def interpolate(self, axis1=None, axis2=None, degree=2, sorted=False):
' Interpolates the data on a new two-dimensional, equidistantly\n spaced grid.\n '
axes = [axis1, axis2]
for i in range(self.ndim):
if (axes[i] is None):
axes[i] = self.axes[i]
orig_axes = self.axes
data = self.data.copy()
if (self.uncertainty is not None):
uncertainty = self.uncertainty.copy()
if (not sorted):
for i in range(len(orig_axes)):
idx = np.argsort(orig_axes[i])
orig_axes[i] = orig_axes[i][idx]
data = np.take(data, idx, axis=i)
if (self.uncertainty is not None):
uncertainty = np.take(uncertainty, idx, axis=i)
dataf = RegularGridInterpolator(tuple(orig_axes), data, bounds_error=False, fill_value=0.0)
grid = lib.build_coords(*axes)
self.data = dataf(grid)
self.axes = axes
if (self.uncertainty is not None):
dataf = RegularGridInterpolator(tuple(orig_axes), uncertainty, bounds_error=False, fill_value=0.0)
self.uncertainty = dataf(grid) | def interpolate(self, axis1=None, axis2=None, degree=2, sorted=False):
' Interpolates the data on a new two-dimensional, equidistantly\n spaced grid.\n '
axes = [axis1, axis2]
for i in range(self.ndim):
if (axes[i] is None):
axes[i] = self.axes[i]
orig_axes = self.axes
data = self.data.copy()
if (self.uncertainty is not None):
uncertainty = self.uncertainty.copy()
if (not sorted):
for i in range(len(orig_axes)):
idx = np.argsort(orig_axes[i])
orig_axes[i] = orig_axes[i][idx]
data = np.take(data, idx, axis=i)
if (self.uncertainty is not None):
uncertainty = np.take(uncertainty, idx, axis=i)
dataf = RegularGridInterpolator(tuple(orig_axes), data, bounds_error=False, fill_value=0.0)
grid = lib.build_coords(*axes)
self.data = dataf(grid)
self.axes = axes
if (self.uncertainty is not None):
dataf = RegularGridInterpolator(tuple(orig_axes), uncertainty, bounds_error=False, fill_value=0.0)
self.uncertainty = dataf(grid)<|docstring|>Interpolates the data on a new two-dimensional, equidistantly
spaced grid.<|endoftext|> |
b373add2efa1810024b8fd6f137b1f449e900cf2cb4b49185ad7a589dfc20cf5 | def flip(self, *axes):
' Flips the data on the specified axes.\n '
if (len(axes) == 0):
return
axes = lib.as_list(axes)
slices = [slice(None) for ax in self.axes]
for ax in axes:
self.axes[ax] = self.axes[ax][::(- 1)]
slices[ax] = slice(None, None, (- 1))
self.data = self.data[slices]
if (self.uncertainty is not None):
self.uncertainty = self.uncertainty[slices] | Flips the data on the specified axes. | pypret/mesh_data.py | flip | QF06/pypret | 36 | python | def flip(self, *axes):
' \n '
if (len(axes) == 0):
return
axes = lib.as_list(axes)
slices = [slice(None) for ax in self.axes]
for ax in axes:
self.axes[ax] = self.axes[ax][::(- 1)]
slices[ax] = slice(None, None, (- 1))
self.data = self.data[slices]
if (self.uncertainty is not None):
self.uncertainty = self.uncertainty[slices] | def flip(self, *axes):
' \n '
if (len(axes) == 0):
return
axes = lib.as_list(axes)
slices = [slice(None) for ax in self.axes]
for ax in axes:
self.axes[ax] = self.axes[ax][::(- 1)]
slices[ax] = slice(None, None, (- 1))
self.data = self.data[slices]
if (self.uncertainty is not None):
self.uncertainty = self.uncertainty[slices]<|docstring|>Flips the data on the specified axes.<|endoftext|> |
349aa2083e3d4e65027e840db747c82c823adc20bbc7ac6ab02d419e5dcc6dd8 | def parse(image, origin_anchors):
'\n :param image: input picture, shape like (H, W, 3)\n :param origin_anchors: text like ["201,162,207,229",\n "208,162,223,229",\n "224,162,239,229"]\n each line was a anchor box in image\n :return: positive: 与ground truth的IOU大于0.7就是positive sample\n negative: 与ground truth的IOU小于0.5就是negative sample\n vertical_reg:\n side_refinement_reg:\n '
positive = []
negative = []
vertical_reg = []
side_refinement_reg = []
(height, width) = (np.array(image.shape[:2]) / 16)
prepared_anchors = get_all_prepared_anchors(height, width)
ground_truth_anchors = get_all_gt_anchors(origin_anchors)
pred_gt_iou = {}
for key in prepared_anchors:
prepared_anchors_pre_space = prepared_anchors[key]
for (k, prepared_anchor) in enumerate(prepared_anchors_pre_space):
iou_key = ((key + '-') + str(k))
if (iou_key not in pred_gt_iou):
pred_gt_iou[iou_key] = []
for gt_key in ground_truth_anchors:
gt_anchor = ground_truth_anchors[gt_key]
iou = _cal_iou(prepared_anchor, gt_anchor[0], gt_anchor[1], gt_anchor[2])
pred_gt_iou[iou_key].append(iou)
for iou_key in pred_gt_iou:
ious = pred_gt_iou[iou_key]
indices = iou_key.split('-')
prepared_anchor = prepared_anchors[((indices[0] + '-') + indices[1])][int(indices[2])]
ground_truth_anchor_array = [ground_truth_anchors[gt_key] for gt_key in ground_truth_anchors]
max_iou = max(ious)
if (max_iou < 0.4):
negative.append(prepared_anchor)
elif (max_iou > 0.7):
positive.append(prepared_anchor)
else:
for (gt_index, iou) in enumerate(ious):
if (iou < 0.5):
continue
gt_anchor = ground_truth_anchors[gt_index]
(is_side, is_left) = _is_side_anchor(gt_index, gt_anchor, ground_truth_anchor_array)
yaxis = prepared_anchor[3]
prepared_anchor_height = ANCHOR_HEIGHTS[prepared_anchor[2]]
vc = ((yaxis - gt_anchor[0][1]) / prepared_anchor_height)
vh = math.log10((gt_anchor[1] / prepared_anchor_height))
vertical_reg.append((prepared_anchor[0], prepared_anchor[1], prepared_anchor[2], vc, vh))
if is_side:
x_axis = gt_anchor[3][(0 if is_left else 2)]
side_refinement_reg.append((prepared_anchor[0], prepared_anchor[1], prepared_anchor[2], ((x_axis - (prepared_anchor[1] * (16 if is_left else 17))) / 16)))
positive = random.sample(positive, min(NUM_OF_SAMPLE, len(positive)))
negative = random.sample(negative, min(NUM_OF_SAMPLE, len(negative)))
return (positive, negative, vertical_reg, side_refinement_reg) | :param image: input picture, shape like (H, W, 3)
:param origin_anchors: text like ["201,162,207,229",
"208,162,223,229",
"224,162,239,229"]
each line was a anchor box in image
:return: positive: 与ground truth的IOU大于0.7就是positive sample
negative: 与ground truth的IOU小于0.5就是negative sample
vertical_reg:
side_refinement_reg: | model/localization/ctpn/ctpn_anchor.py | parse | kokoyy/OCR.pytorch | 0 | python | def parse(image, origin_anchors):
'\n :param image: input picture, shape like (H, W, 3)\n :param origin_anchors: text like ["201,162,207,229",\n "208,162,223,229",\n "224,162,239,229"]\n each line was a anchor box in image\n :return: positive: 与ground truth的IOU大于0.7就是positive sample\n negative: 与ground truth的IOU小于0.5就是negative sample\n vertical_reg:\n side_refinement_reg:\n '
positive = []
negative = []
vertical_reg = []
side_refinement_reg = []
(height, width) = (np.array(image.shape[:2]) / 16)
prepared_anchors = get_all_prepared_anchors(height, width)
ground_truth_anchors = get_all_gt_anchors(origin_anchors)
pred_gt_iou = {}
for key in prepared_anchors:
prepared_anchors_pre_space = prepared_anchors[key]
for (k, prepared_anchor) in enumerate(prepared_anchors_pre_space):
iou_key = ((key + '-') + str(k))
if (iou_key not in pred_gt_iou):
pred_gt_iou[iou_key] = []
for gt_key in ground_truth_anchors:
gt_anchor = ground_truth_anchors[gt_key]
iou = _cal_iou(prepared_anchor, gt_anchor[0], gt_anchor[1], gt_anchor[2])
pred_gt_iou[iou_key].append(iou)
for iou_key in pred_gt_iou:
ious = pred_gt_iou[iou_key]
indices = iou_key.split('-')
prepared_anchor = prepared_anchors[((indices[0] + '-') + indices[1])][int(indices[2])]
ground_truth_anchor_array = [ground_truth_anchors[gt_key] for gt_key in ground_truth_anchors]
max_iou = max(ious)
if (max_iou < 0.4):
negative.append(prepared_anchor)
elif (max_iou > 0.7):
positive.append(prepared_anchor)
else:
for (gt_index, iou) in enumerate(ious):
if (iou < 0.5):
continue
gt_anchor = ground_truth_anchors[gt_index]
(is_side, is_left) = _is_side_anchor(gt_index, gt_anchor, ground_truth_anchor_array)
yaxis = prepared_anchor[3]
prepared_anchor_height = ANCHOR_HEIGHTS[prepared_anchor[2]]
vc = ((yaxis - gt_anchor[0][1]) / prepared_anchor_height)
vh = math.log10((gt_anchor[1] / prepared_anchor_height))
vertical_reg.append((prepared_anchor[0], prepared_anchor[1], prepared_anchor[2], vc, vh))
if is_side:
x_axis = gt_anchor[3][(0 if is_left else 2)]
side_refinement_reg.append((prepared_anchor[0], prepared_anchor[1], prepared_anchor[2], ((x_axis - (prepared_anchor[1] * (16 if is_left else 17))) / 16)))
positive = random.sample(positive, min(NUM_OF_SAMPLE, len(positive)))
negative = random.sample(negative, min(NUM_OF_SAMPLE, len(negative)))
return (positive, negative, vertical_reg, side_refinement_reg) | def parse(image, origin_anchors):
'\n :param image: input picture, shape like (H, W, 3)\n :param origin_anchors: text like ["201,162,207,229",\n "208,162,223,229",\n "224,162,239,229"]\n each line was a anchor box in image\n :return: positive: 与ground truth的IOU大于0.7就是positive sample\n negative: 与ground truth的IOU小于0.5就是negative sample\n vertical_reg:\n side_refinement_reg:\n '
positive = []
negative = []
vertical_reg = []
side_refinement_reg = []
(height, width) = (np.array(image.shape[:2]) / 16)
prepared_anchors = get_all_prepared_anchors(height, width)
ground_truth_anchors = get_all_gt_anchors(origin_anchors)
pred_gt_iou = {}
for key in prepared_anchors:
prepared_anchors_pre_space = prepared_anchors[key]
for (k, prepared_anchor) in enumerate(prepared_anchors_pre_space):
iou_key = ((key + '-') + str(k))
if (iou_key not in pred_gt_iou):
pred_gt_iou[iou_key] = []
for gt_key in ground_truth_anchors:
gt_anchor = ground_truth_anchors[gt_key]
iou = _cal_iou(prepared_anchor, gt_anchor[0], gt_anchor[1], gt_anchor[2])
pred_gt_iou[iou_key].append(iou)
for iou_key in pred_gt_iou:
ious = pred_gt_iou[iou_key]
indices = iou_key.split('-')
prepared_anchor = prepared_anchors[((indices[0] + '-') + indices[1])][int(indices[2])]
ground_truth_anchor_array = [ground_truth_anchors[gt_key] for gt_key in ground_truth_anchors]
max_iou = max(ious)
if (max_iou < 0.4):
negative.append(prepared_anchor)
elif (max_iou > 0.7):
positive.append(prepared_anchor)
else:
for (gt_index, iou) in enumerate(ious):
if (iou < 0.5):
continue
gt_anchor = ground_truth_anchors[gt_index]
(is_side, is_left) = _is_side_anchor(gt_index, gt_anchor, ground_truth_anchor_array)
yaxis = prepared_anchor[3]
prepared_anchor_height = ANCHOR_HEIGHTS[prepared_anchor[2]]
vc = ((yaxis - gt_anchor[0][1]) / prepared_anchor_height)
vh = math.log10((gt_anchor[1] / prepared_anchor_height))
vertical_reg.append((prepared_anchor[0], prepared_anchor[1], prepared_anchor[2], vc, vh))
if is_side:
x_axis = gt_anchor[3][(0 if is_left else 2)]
side_refinement_reg.append((prepared_anchor[0], prepared_anchor[1], prepared_anchor[2], ((x_axis - (prepared_anchor[1] * (16 if is_left else 17))) / 16)))
positive = random.sample(positive, min(NUM_OF_SAMPLE, len(positive)))
negative = random.sample(negative, min(NUM_OF_SAMPLE, len(negative)))
return (positive, negative, vertical_reg, side_refinement_reg)<|docstring|>:param image: input picture, shape like (H, W, 3)
:param origin_anchors: text like ["201,162,207,229",
"208,162,223,229",
"224,162,239,229"]
each line was a anchor box in image
:return: positive: 与ground truth的IOU大于0.7就是positive sample
negative: 与ground truth的IOU小于0.5就是negative sample
vertical_reg:
side_refinement_reg:<|endoftext|> |
4031f2da0660b76cf7d3f71a133cebdf4d3ef9d51400e5c9a3647998d2660a5b | def _cal_iou(prepared_anchor, gt_center, gt_height, gt_width):
'\n calculate iou between prepared anchor and ground truth anchor\n :param prepared_anchor: shape like (j, i, k, center)\n :param gt_center:\n :param gt_height:\n :param gt_width:\n :return:\n '
prepared_anchor_height = ANCHOR_HEIGHTS[prepared_anchor[2]]
prepared_anchor_width = FIX_ANCHOR_WIDTH
prepared_anchor_left = (prepared_anchor[1] * 16)
prepared_anchor_right = (((prepared_anchor[1] + 1) * 16) - 1)
prepared_anchor_top = (prepared_anchor[3] - (prepared_anchor_height / 2))
prepared_anchor_bottom = (prepared_anchor[3] + (prepared_anchor_height / 2))
gt_anchor_top = (gt_center[1] - (gt_height / 2))
gt_anchor_bottom = (gt_center[1] + (gt_height / 2))
gt_anchor_left = (gt_center[0] - (gt_width / 2))
gt_anchor_right = (gt_center[0] + (gt_width / 2))
if ((gt_anchor_top <= prepared_anchor_top) and (gt_anchor_bottom < prepared_anchor_top)):
return 0
if ((prepared_anchor_top <= gt_anchor_top) and (prepared_anchor_bottom < gt_anchor_top)):
return 0
if ((gt_anchor_left <= prepared_anchor_left) and (gt_anchor_right < prepared_anchor_left)):
return 0
if ((prepared_anchor_left <= gt_anchor_left) and (prepared_anchor_right < gt_anchor_left)):
return 0
iou_width = (min(prepared_anchor_right, gt_anchor_right) - max(prepared_anchor_left, gt_anchor_left))
iou_height = (min(prepared_anchor_bottom, gt_anchor_bottom) - max(prepared_anchor_top, gt_anchor_top))
iou_area = (iou_width * iou_height)
total_area = (((prepared_anchor_height * prepared_anchor_width) + (gt_height * gt_width)) - iou_area)
iou = (iou_area / total_area)
return iou | calculate iou between prepared anchor and ground truth anchor
:param prepared_anchor: shape like (j, i, k, center)
:param gt_center:
:param gt_height:
:param gt_width:
:return: | model/localization/ctpn/ctpn_anchor.py | _cal_iou | kokoyy/OCR.pytorch | 0 | python | def _cal_iou(prepared_anchor, gt_center, gt_height, gt_width):
'\n calculate iou between prepared anchor and ground truth anchor\n :param prepared_anchor: shape like (j, i, k, center)\n :param gt_center:\n :param gt_height:\n :param gt_width:\n :return:\n '
prepared_anchor_height = ANCHOR_HEIGHTS[prepared_anchor[2]]
prepared_anchor_width = FIX_ANCHOR_WIDTH
prepared_anchor_left = (prepared_anchor[1] * 16)
prepared_anchor_right = (((prepared_anchor[1] + 1) * 16) - 1)
prepared_anchor_top = (prepared_anchor[3] - (prepared_anchor_height / 2))
prepared_anchor_bottom = (prepared_anchor[3] + (prepared_anchor_height / 2))
gt_anchor_top = (gt_center[1] - (gt_height / 2))
gt_anchor_bottom = (gt_center[1] + (gt_height / 2))
gt_anchor_left = (gt_center[0] - (gt_width / 2))
gt_anchor_right = (gt_center[0] + (gt_width / 2))
if ((gt_anchor_top <= prepared_anchor_top) and (gt_anchor_bottom < prepared_anchor_top)):
return 0
if ((prepared_anchor_top <= gt_anchor_top) and (prepared_anchor_bottom < gt_anchor_top)):
return 0
if ((gt_anchor_left <= prepared_anchor_left) and (gt_anchor_right < prepared_anchor_left)):
return 0
if ((prepared_anchor_left <= gt_anchor_left) and (prepared_anchor_right < gt_anchor_left)):
return 0
iou_width = (min(prepared_anchor_right, gt_anchor_right) - max(prepared_anchor_left, gt_anchor_left))
iou_height = (min(prepared_anchor_bottom, gt_anchor_bottom) - max(prepared_anchor_top, gt_anchor_top))
iou_area = (iou_width * iou_height)
total_area = (((prepared_anchor_height * prepared_anchor_width) + (gt_height * gt_width)) - iou_area)
iou = (iou_area / total_area)
return iou | def _cal_iou(prepared_anchor, gt_center, gt_height, gt_width):
'\n calculate iou between prepared anchor and ground truth anchor\n :param prepared_anchor: shape like (j, i, k, center)\n :param gt_center:\n :param gt_height:\n :param gt_width:\n :return:\n '
prepared_anchor_height = ANCHOR_HEIGHTS[prepared_anchor[2]]
prepared_anchor_width = FIX_ANCHOR_WIDTH
prepared_anchor_left = (prepared_anchor[1] * 16)
prepared_anchor_right = (((prepared_anchor[1] + 1) * 16) - 1)
prepared_anchor_top = (prepared_anchor[3] - (prepared_anchor_height / 2))
prepared_anchor_bottom = (prepared_anchor[3] + (prepared_anchor_height / 2))
gt_anchor_top = (gt_center[1] - (gt_height / 2))
gt_anchor_bottom = (gt_center[1] + (gt_height / 2))
gt_anchor_left = (gt_center[0] - (gt_width / 2))
gt_anchor_right = (gt_center[0] + (gt_width / 2))
if ((gt_anchor_top <= prepared_anchor_top) and (gt_anchor_bottom < prepared_anchor_top)):
return 0
if ((prepared_anchor_top <= gt_anchor_top) and (prepared_anchor_bottom < gt_anchor_top)):
return 0
if ((gt_anchor_left <= prepared_anchor_left) and (gt_anchor_right < prepared_anchor_left)):
return 0
if ((prepared_anchor_left <= gt_anchor_left) and (prepared_anchor_right < gt_anchor_left)):
return 0
iou_width = (min(prepared_anchor_right, gt_anchor_right) - max(prepared_anchor_left, gt_anchor_left))
iou_height = (min(prepared_anchor_bottom, gt_anchor_bottom) - max(prepared_anchor_top, gt_anchor_top))
iou_area = (iou_width * iou_height)
total_area = (((prepared_anchor_height * prepared_anchor_width) + (gt_height * gt_width)) - iou_area)
iou = (iou_area / total_area)
return iou<|docstring|>calculate iou between prepared anchor and ground truth anchor
:param prepared_anchor: shape like (j, i, k, center)
:param gt_center:
:param gt_height:
:param gt_width:
:return:<|endoftext|> |
a9b3c6cce6b89ae142962c2e8934814917cd9a0c294060f0df1d7635e6784030 | def _is_side_anchor(anchor_index, ground_truth_anchor, ground_truth_anchors):
'\n check if anchor is on the left or right side of Bbox\n :param anchor_index: index of ground_truth_anchor in ground_truth_anchors\n :param ground_truth_anchor:\n :param ground_truth_anchors:\n :return:\n '
if ((anchor_index == 0) or (anchor_index == (len(ground_truth_anchors) - 1))):
return (True, (anchor_index == 0))
previous_ground_truth_anchor = ground_truth_anchors[(anchor_index - 1)]
distance = math.fabs((previous_ground_truth_anchor[3][2] - ground_truth_anchor[3][0]))
if (distance > 1):
return (True, True)
next_ground_truth_anchor = ground_truth_anchors[(anchor_index + 1)]
distance = math.fabs((ground_truth_anchor[3][2] - next_ground_truth_anchor[3][0]))
if (distance > 1):
return (True, False)
return (False, False) | check if anchor is on the left or right side of Bbox
:param anchor_index: index of ground_truth_anchor in ground_truth_anchors
:param ground_truth_anchor:
:param ground_truth_anchors:
:return: | model/localization/ctpn/ctpn_anchor.py | _is_side_anchor | kokoyy/OCR.pytorch | 0 | python | def _is_side_anchor(anchor_index, ground_truth_anchor, ground_truth_anchors):
'\n check if anchor is on the left or right side of Bbox\n :param anchor_index: index of ground_truth_anchor in ground_truth_anchors\n :param ground_truth_anchor:\n :param ground_truth_anchors:\n :return:\n '
if ((anchor_index == 0) or (anchor_index == (len(ground_truth_anchors) - 1))):
return (True, (anchor_index == 0))
previous_ground_truth_anchor = ground_truth_anchors[(anchor_index - 1)]
distance = math.fabs((previous_ground_truth_anchor[3][2] - ground_truth_anchor[3][0]))
if (distance > 1):
return (True, True)
next_ground_truth_anchor = ground_truth_anchors[(anchor_index + 1)]
distance = math.fabs((ground_truth_anchor[3][2] - next_ground_truth_anchor[3][0]))
if (distance > 1):
return (True, False)
return (False, False) | def _is_side_anchor(anchor_index, ground_truth_anchor, ground_truth_anchors):
'\n check if anchor is on the left or right side of Bbox\n :param anchor_index: index of ground_truth_anchor in ground_truth_anchors\n :param ground_truth_anchor:\n :param ground_truth_anchors:\n :return:\n '
if ((anchor_index == 0) or (anchor_index == (len(ground_truth_anchors) - 1))):
return (True, (anchor_index == 0))
previous_ground_truth_anchor = ground_truth_anchors[(anchor_index - 1)]
distance = math.fabs((previous_ground_truth_anchor[3][2] - ground_truth_anchor[3][0]))
if (distance > 1):
return (True, True)
next_ground_truth_anchor = ground_truth_anchors[(anchor_index + 1)]
distance = math.fabs((ground_truth_anchor[3][2] - next_ground_truth_anchor[3][0]))
if (distance > 1):
return (True, False)
return (False, False)<|docstring|>check if anchor is on the left or right side of Bbox
:param anchor_index: index of ground_truth_anchor in ground_truth_anchors
:param ground_truth_anchor:
:param ground_truth_anchors:
:return:<|endoftext|> |
1123983ae4092f9bfb2b4f1d7ea432decd9dcff6c686b454174d55038e147309 | def return_empty_mappings(n=DEFAULT_N):
" Return 'n' * empty mappings\n "
y = 0
mappings = []
while (y < n):
mappings.append({'in': '', 'out': '', 'context_before': '', 'context_after': ''})
y += 1
return mappings | Return 'n' * empty mappings | g2p/__init__.py | return_empty_mappings | joanise/g2p | 0 | python | def return_empty_mappings(n=DEFAULT_N):
" \n "
y = 0
mappings = []
while (y < n):
mappings.append({'in': , 'out': , 'context_before': , 'context_after': })
y += 1
return mappings | def return_empty_mappings(n=DEFAULT_N):
" \n "
y = 0
mappings = []
while (y < n):
mappings.append({'in': , 'out': , 'context_before': , 'context_after': })
y += 1
return mappings<|docstring|>Return 'n' * empty mappings<|endoftext|> |
5d760c2e314a6ecd4a170431b6b05fd7f3247c60e8270c1bb9fac305053b5394 | def hot_to_mappings(hot_data):
' Parse data from HandsOnTable to Mapping format\n '
return [{'context_before': str((x[2] or '')), 'in': str((x[0] or '')), 'context_after': str((x[3] or '')), 'out': str((x[1] or ''))} for x in hot_data if (x[0] or x[1])] | Parse data from HandsOnTable to Mapping format | g2p/__init__.py | hot_to_mappings | joanise/g2p | 0 | python | def hot_to_mappings(hot_data):
' \n '
return [{'context_before': str((x[2] or )), 'in': str((x[0] or )), 'context_after': str((x[3] or )), 'out': str((x[1] or ))} for x in hot_data if (x[0] or x[1])] | def hot_to_mappings(hot_data):
' \n '
return [{'context_before': str((x[2] or )), 'in': str((x[0] or )), 'context_after': str((x[3] or )), 'out': str((x[1] or ))} for x in hot_data if (x[0] or x[1])]<|docstring|>Parse data from HandsOnTable to Mapping format<|endoftext|> |
56122db959bff84bc9217971fbf7f6943e5eed699b784980c82ff472acfdec7c | @APP.route('/')
def home():
' Return homepage of g2p Studio\n '
return render_template('index.html', langs=LANGS) | Return homepage of g2p Studio | g2p/__init__.py | home | joanise/g2p | 0 | python | @APP.route('/')
def home():
' \n '
return render_template('index.html', langs=LANGS) | @APP.route('/')
def home():
' \n '
return render_template('index.html', langs=LANGS)<|docstring|>Return homepage of g2p Studio<|endoftext|> |
f1a5766fc5209c161c059b10095712bc24c62655f5767012f673f4f3b9903bb5 | @SOCKETIO.on('index conversion event', namespace='/convert')
def index_convert(message):
' Convert input text and return output with indices for echart\n '
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
(output_string, indices) = transducer(message['data']['input_string'], index=True)
(data, links) = return_echart_data(indices)
emit('index conversion response', {'output_string': output_string, 'index_data': data, 'index_links': links}) | Convert input text and return output with indices for echart | g2p/__init__.py | index_convert | joanise/g2p | 0 | python | @SOCKETIO.on('index conversion event', namespace='/convert')
def index_convert(message):
' \n '
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
(output_string, indices) = transducer(message['data']['input_string'], index=True)
(data, links) = return_echart_data(indices)
emit('index conversion response', {'output_string': output_string, 'index_data': data, 'index_links': links}) | @SOCKETIO.on('index conversion event', namespace='/convert')
def index_convert(message):
' \n '
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
(output_string, indices) = transducer(message['data']['input_string'], index=True)
(data, links) = return_echart_data(indices)
emit('index conversion response', {'output_string': output_string, 'index_data': data, 'index_links': links})<|docstring|>Convert input text and return output with indices for echart<|endoftext|> |
3f7e3fdadc00562c093059fecedfba1d9342fd5f8e2ebcd225c85c8e52723733 | @SOCKETIO.on('conversion event', namespace='/convert')
def convert(message):
' Convert input text and return output\n '
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
output_string = transducer(message['data']['input_string'])
emit('conversion response', {'output_string': output_string}) | Convert input text and return output | g2p/__init__.py | convert | joanise/g2p | 0 | python | @SOCKETIO.on('conversion event', namespace='/convert')
def convert(message):
' \n '
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
output_string = transducer(message['data']['input_string'])
emit('conversion response', {'output_string': output_string}) | @SOCKETIO.on('conversion event', namespace='/convert')
def convert(message):
' \n '
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
output_string = transducer(message['data']['input_string'])
emit('conversion response', {'output_string': output_string})<|docstring|>Convert input text and return output<|endoftext|> |
1a77711de78864200190766f5793cc4c8a3e8c4045d2a5efa8b993a13a3cc0c1 | @SOCKETIO.on('table event', namespace='/table')
def change_table(message):
' Change the lookup table\n '
if ((message['in_lang'] == 'custom') or (message['out_lang'] == 'custom')):
mappings = Mapping(return_empty_mappings())
else:
mappings = Mapping(in_lang=message['in_lang'], out_lang=message['out_lang'])
emit('table response', {'mappings': mappings.plain_mapping(), 'abbs': expand_abbreviations(mappings.abbreviations), 'kwargs': mappings.kwargs}) | Change the lookup table | g2p/__init__.py | change_table | joanise/g2p | 0 | python | @SOCKETIO.on('table event', namespace='/table')
def change_table(message):
' \n '
if ((message['in_lang'] == 'custom') or (message['out_lang'] == 'custom')):
mappings = Mapping(return_empty_mappings())
else:
mappings = Mapping(in_lang=message['in_lang'], out_lang=message['out_lang'])
emit('table response', {'mappings': mappings.plain_mapping(), 'abbs': expand_abbreviations(mappings.abbreviations), 'kwargs': mappings.kwargs}) | @SOCKETIO.on('table event', namespace='/table')
def change_table(message):
' \n '
if ((message['in_lang'] == 'custom') or (message['out_lang'] == 'custom')):
mappings = Mapping(return_empty_mappings())
else:
mappings = Mapping(in_lang=message['in_lang'], out_lang=message['out_lang'])
emit('table response', {'mappings': mappings.plain_mapping(), 'abbs': expand_abbreviations(mappings.abbreviations), 'kwargs': mappings.kwargs})<|docstring|>Change the lookup table<|endoftext|> |
07790d49fd16aa21141253bfdc3ff95c78b9b7f3b50376d61739233eb2a00e1d | @SOCKETIO.on('connect', namespace='/connect')
def test_connect():
' Let client know disconnected\n '
emit('connection response', {'data': 'Connected'}) | Let client know disconnected | g2p/__init__.py | test_connect | joanise/g2p | 0 | python | @SOCKETIO.on('connect', namespace='/connect')
def test_connect():
' \n '
emit('connection response', {'data': 'Connected'}) | @SOCKETIO.on('connect', namespace='/connect')
def test_connect():
' \n '
emit('connection response', {'data': 'Connected'})<|docstring|>Let client know disconnected<|endoftext|> |
9295997e364b284644c3fe919f525a93c741ae2a70e6db7846e96c2028ede546 | @SOCKETIO.on('disconnect', namespace='/connect')
def test_disconnect():
' Let client know disconnected\n '
emit('connection response', {'data': 'Disconnected'}) | Let client know disconnected | g2p/__init__.py | test_disconnect | joanise/g2p | 0 | python | @SOCKETIO.on('disconnect', namespace='/connect')
def test_disconnect():
' \n '
emit('connection response', {'data': 'Disconnected'}) | @SOCKETIO.on('disconnect', namespace='/connect')
def test_disconnect():
' \n '
emit('connection response', {'data': 'Disconnected'})<|docstring|>Let client know disconnected<|endoftext|> |
de338d8a18b7c00e320a1113ce7e43bd6dbe7e1228f30cd4dc2f8761f9c93090 | def rect(t):
'Rectangle function.'
f = np.zeros_like(t)
I = (np.abs(t) < 0.5)
f[I] = 1
f[(np.abs(t) == 0.5)] = 0.5
return f | Rectangle function. | pyinverse/rect.py | rect | butala/pyinverse | 1 | python | def rect(t):
f = np.zeros_like(t)
I = (np.abs(t) < 0.5)
f[I] = 1
f[(np.abs(t) == 0.5)] = 0.5
return f | def rect(t):
f = np.zeros_like(t)
I = (np.abs(t) < 0.5)
f[I] = 1
f[(np.abs(t) == 0.5)] = 0.5
return f<|docstring|>Rectangle function.<|endoftext|> |
90e55aa9a1e9af7ba1de66d7f898e6f6014b3ba59e4381f5d7687368a7901eb6 | def srect(t, a):
'Scaled rectangle function.'
return rect((a * t)) | Scaled rectangle function. | pyinverse/rect.py | srect | butala/pyinverse | 1 | python | def srect(t, a):
return rect((a * t)) | def srect(t, a):
return rect((a * t))<|docstring|>Scaled rectangle function.<|endoftext|> |
a4436d533c80dae5dae64bc28246d822fef7a3833411250ddc164511d31d7f09 | def srect_conv_srect(t, a, b):
'Scaled rectangle convolved with scaled rectangle.'
assert ((a > 0) and (b > 0))
if (a < b):
return srect_conv_srect(t, b, a)
f = np.zeros_like(t)
I1 = (np.abs(t) < ((a + b) / ((2 * a) * b)))
I2 = (np.abs(t) > ((a - b) / ((2 * a) * b)))
I = (I1 & I2)
f[I] = (((a + b) / ((2 * a) * b)) - np.abs(t[I]))
f[(~ I2)] = (1 / a)
return f | Scaled rectangle convolved with scaled rectangle. | pyinverse/rect.py | srect_conv_srect | butala/pyinverse | 1 | python | def srect_conv_srect(t, a, b):
assert ((a > 0) and (b > 0))
if (a < b):
return srect_conv_srect(t, b, a)
f = np.zeros_like(t)
I1 = (np.abs(t) < ((a + b) / ((2 * a) * b)))
I2 = (np.abs(t) > ((a - b) / ((2 * a) * b)))
I = (I1 & I2)
f[I] = (((a + b) / ((2 * a) * b)) - np.abs(t[I]))
f[(~ I2)] = (1 / a)
return f | def srect_conv_srect(t, a, b):
assert ((a > 0) and (b > 0))
if (a < b):
return srect_conv_srect(t, b, a)
f = np.zeros_like(t)
I1 = (np.abs(t) < ((a + b) / ((2 * a) * b)))
I2 = (np.abs(t) > ((a - b) / ((2 * a) * b)))
I = (I1 & I2)
f[I] = (((a + b) / ((2 * a) * b)) - np.abs(t[I]))
f[(~ I2)] = (1 / a)
return f<|docstring|>Scaled rectangle convolved with scaled rectangle.<|endoftext|> |
c505ddb27007ac7ac3e1bee6618722f90ae042fceca31d962ccb068521d5799e | def srect_2D_proj(theta, t, a, b):
'Projection of the scaled rectangle function.'
theta = np.asarray(theta)
if (a < b):
return srect_2D_proj((theta - (np.pi / 2)), t, b, a)
P = np.empty((len(t), len(theta)))
for (k, theta_k) in enumerate((theta % (2 * np.pi))):
if (theta_k == 0):
p = (srect(t, a) / b)
elif (theta_k == (np.pi / 2)):
p = (srect(t, b) / a)
elif (theta_k == np.pi):
p = (srect((- t), a) / b)
elif (theta_k == ((3 * np.pi) / 2)):
p = (srect((- t), b) / a)
else:
if (theta_k < (np.pi / 2)):
sign = 1
elif (theta_k < np.pi):
sign = (- 1)
elif (theta_k < ((3 * np.pi) / 2)):
sign = 1
else:
sign = (- 1)
abs_cos = np.abs(np.cos(theta_k))
abs_sin = np.abs(np.sin(theta_k))
p = ((1 / (abs_cos * abs_sin)) * srect_conv_srect(t, (a / abs_cos), (b / abs_sin)))
P[(:, k)] = p
return P | Projection of the scaled rectangle function. | pyinverse/rect.py | srect_2D_proj | butala/pyinverse | 1 | python | def srect_2D_proj(theta, t, a, b):
theta = np.asarray(theta)
if (a < b):
return srect_2D_proj((theta - (np.pi / 2)), t, b, a)
P = np.empty((len(t), len(theta)))
for (k, theta_k) in enumerate((theta % (2 * np.pi))):
if (theta_k == 0):
p = (srect(t, a) / b)
elif (theta_k == (np.pi / 2)):
p = (srect(t, b) / a)
elif (theta_k == np.pi):
p = (srect((- t), a) / b)
elif (theta_k == ((3 * np.pi) / 2)):
p = (srect((- t), b) / a)
else:
if (theta_k < (np.pi / 2)):
sign = 1
elif (theta_k < np.pi):
sign = (- 1)
elif (theta_k < ((3 * np.pi) / 2)):
sign = 1
else:
sign = (- 1)
abs_cos = np.abs(np.cos(theta_k))
abs_sin = np.abs(np.sin(theta_k))
p = ((1 / (abs_cos * abs_sin)) * srect_conv_srect(t, (a / abs_cos), (b / abs_sin)))
P[(:, k)] = p
return P | def srect_2D_proj(theta, t, a, b):
theta = np.asarray(theta)
if (a < b):
return srect_2D_proj((theta - (np.pi / 2)), t, b, a)
P = np.empty((len(t), len(theta)))
for (k, theta_k) in enumerate((theta % (2 * np.pi))):
if (theta_k == 0):
p = (srect(t, a) / b)
elif (theta_k == (np.pi / 2)):
p = (srect(t, b) / a)
elif (theta_k == np.pi):
p = (srect((- t), a) / b)
elif (theta_k == ((3 * np.pi) / 2)):
p = (srect((- t), b) / a)
else:
if (theta_k < (np.pi / 2)):
sign = 1
elif (theta_k < np.pi):
sign = (- 1)
elif (theta_k < ((3 * np.pi) / 2)):
sign = 1
else:
sign = (- 1)
abs_cos = np.abs(np.cos(theta_k))
abs_sin = np.abs(np.sin(theta_k))
p = ((1 / (abs_cos * abs_sin)) * srect_conv_srect(t, (a / abs_cos), (b / abs_sin)))
P[(:, k)] = p
return P<|docstring|>Projection of the scaled rectangle function.<|endoftext|> |
bfa3f660eaa47a51e498483274d50fcd93378ac6bd89a430d2a01da03fd003bd | def srect_2D_proj_ramp(theta, t, a, b):
'Ramp filtered projection of the scaled rectangle function.'
theta = np.asarray(theta)
a = (1 / a)
b = (1 / b)
P = np.empty((len(t), len(theta)))
for (k, theta_k) in enumerate((theta % (2 * np.pi))):
if ((theta_k == 0) or (theta_k == np.pi)):
p = (((((- 2) * a) * b) / (np.pi ** 2)) / ((4 * (t ** 2)) - (a ** 2)))
elif ((theta_k == (np.pi / 2)) or (theta_k == ((3 * np.pi) / 2))):
p = (((((- 2) * a) * b) / (np.pi ** 2)) / ((4 * (t ** 2)) - (b ** 2)))
else:
p = ((1 / (((2 * (np.pi ** 2)) * np.cos(theta_k)) * np.sin(theta_k))) * np.log(np.abs((((t ** 2) - ((((a * np.cos(theta_k)) + (b * np.sin(theta_k))) / 2) ** 2)) / ((t ** 2) - ((((a * np.cos(theta_k)) - (b * np.sin(theta_k))) / 2) ** 2))))))
P[(:, k)] = p
return P | Ramp filtered projection of the scaled rectangle function. | pyinverse/rect.py | srect_2D_proj_ramp | butala/pyinverse | 1 | python | def srect_2D_proj_ramp(theta, t, a, b):
theta = np.asarray(theta)
a = (1 / a)
b = (1 / b)
P = np.empty((len(t), len(theta)))
for (k, theta_k) in enumerate((theta % (2 * np.pi))):
if ((theta_k == 0) or (theta_k == np.pi)):
p = (((((- 2) * a) * b) / (np.pi ** 2)) / ((4 * (t ** 2)) - (a ** 2)))
elif ((theta_k == (np.pi / 2)) or (theta_k == ((3 * np.pi) / 2))):
p = (((((- 2) * a) * b) / (np.pi ** 2)) / ((4 * (t ** 2)) - (b ** 2)))
else:
p = ((1 / (((2 * (np.pi ** 2)) * np.cos(theta_k)) * np.sin(theta_k))) * np.log(np.abs((((t ** 2) - ((((a * np.cos(theta_k)) + (b * np.sin(theta_k))) / 2) ** 2)) / ((t ** 2) - ((((a * np.cos(theta_k)) - (b * np.sin(theta_k))) / 2) ** 2))))))
P[(:, k)] = p
return P | def srect_2D_proj_ramp(theta, t, a, b):
theta = np.asarray(theta)
a = (1 / a)
b = (1 / b)
P = np.empty((len(t), len(theta)))
for (k, theta_k) in enumerate((theta % (2 * np.pi))):
if ((theta_k == 0) or (theta_k == np.pi)):
p = (((((- 2) * a) * b) / (np.pi ** 2)) / ((4 * (t ** 2)) - (a ** 2)))
elif ((theta_k == (np.pi / 2)) or (theta_k == ((3 * np.pi) / 2))):
p = (((((- 2) * a) * b) / (np.pi ** 2)) / ((4 * (t ** 2)) - (b ** 2)))
else:
p = ((1 / (((2 * (np.pi ** 2)) * np.cos(theta_k)) * np.sin(theta_k))) * np.log(np.abs((((t ** 2) - ((((a * np.cos(theta_k)) + (b * np.sin(theta_k))) / 2) ** 2)) / ((t ** 2) - ((((a * np.cos(theta_k)) - (b * np.sin(theta_k))) / 2) ** 2))))))
P[(:, k)] = p
return P<|docstring|>Ramp filtered projection of the scaled rectangle function.<|endoftext|> |
7b92a636e49c7e93e73bc20d12f8eedc910fe3cf18dd66e1626ea23d556a869c | def rect_conv_rect(x, a=1, b=1):
'Scaled rect convovled wtih scaled rect (CHECK IF THIS DUPLICATES srect_conv_srect).'
assert (a > 0)
assert (b > 0)
return (((step1(((x + (1 / (2 * a))) + (1 / (2 * b)))) - step1(((x - (1 / (2 * a))) + (1 / (2 * b))))) - step1(((x + (1 / (2 * a))) - (1 / (2 * b))))) + step1(((x - (1 / (2 * a))) - (1 / (2 * b))))) | Scaled rect convovled wtih scaled rect (CHECK IF THIS DUPLICATES srect_conv_srect). | pyinverse/rect.py | rect_conv_rect | butala/pyinverse | 1 | python | def rect_conv_rect(x, a=1, b=1):
assert (a > 0)
assert (b > 0)
return (((step1(((x + (1 / (2 * a))) + (1 / (2 * b)))) - step1(((x - (1 / (2 * a))) + (1 / (2 * b))))) - step1(((x + (1 / (2 * a))) - (1 / (2 * b))))) + step1(((x - (1 / (2 * a))) - (1 / (2 * b))))) | def rect_conv_rect(x, a=1, b=1):
assert (a > 0)
assert (b > 0)
return (((step1(((x + (1 / (2 * a))) + (1 / (2 * b)))) - step1(((x - (1 / (2 * a))) + (1 / (2 * b))))) - step1(((x + (1 / (2 * a))) - (1 / (2 * b))))) + step1(((x - (1 / (2 * a))) - (1 / (2 * b)))))<|docstring|>Scaled rect convovled wtih scaled rect (CHECK IF THIS DUPLICATES srect_conv_srect).<|endoftext|> |
bfaac975967a5f06523f7ad42a7431210265c4abafc429d600924f6db182c0d2 | def step(x):
'Heaviside step function u(x).'
y = np.zeros_like(x)
y[(x > 0)] = 1
return y | Heaviside step function u(x). | pyinverse/rect.py | step | butala/pyinverse | 1 | python | def step(x):
y = np.zeros_like(x)
y[(x > 0)] = 1
return y | def step(x):
y = np.zeros_like(x)
y[(x > 0)] = 1
return y<|docstring|>Heaviside step function u(x).<|endoftext|> |
1be88a74fc57578ee8d4ec9474416e600cd0f5df171bd07b150cbf44a3679a58 | def step1(x):
'Convolution of step functions.'
y = np.zeros_like(x)
y[(x > 0)] = x[(x > 0)]
return y | Convolution of step functions. | pyinverse/rect.py | step1 | butala/pyinverse | 1 | python | def step1(x):
y = np.zeros_like(x)
y[(x > 0)] = x[(x > 0)]
return y | def step1(x):
y = np.zeros_like(x)
y[(x > 0)] = x[(x > 0)]
return y<|docstring|>Convolution of step functions.<|endoftext|> |
8a824da643f6ea791e4d7a658f36bdf2dbe7f02929aae803bd7b753e8a4e99fa | def step2(x):
'Convolution of three step functions.'
y = np.zeros_like(x)
y[(x > 0)] = ((1 / 2) * (x[(x > 0)] ** 2))
return y | Convolution of three step functions. | pyinverse/rect.py | step2 | butala/pyinverse | 1 | python | def step2(x):
y = np.zeros_like(x)
y[(x > 0)] = ((1 / 2) * (x[(x > 0)] ** 2))
return y | def step2(x):
y = np.zeros_like(x)
y[(x > 0)] = ((1 / 2) * (x[(x > 0)] ** 2))
return y<|docstring|>Convolution of three step functions.<|endoftext|> |
17600e348a14c902c80ba15d140011fa1e51e080b8f15afbbb5d435ce72fc233 | def tri(x, b=1):
'Triangle function tri(bx) where tri(x) = rect(x) * rect(x).'
assert (b > 0)
return (((b * step1((x + (1 / b)))) - ((2 * b) * step1(x))) + (b * step1((x - (1 / b))))) | Triangle function tri(bx) where tri(x) = rect(x) * rect(x). | pyinverse/rect.py | tri | butala/pyinverse | 1 | python | def tri(x, b=1):
assert (b > 0)
return (((b * step1((x + (1 / b)))) - ((2 * b) * step1(x))) + (b * step1((x - (1 / b))))) | def tri(x, b=1):
assert (b > 0)
return (((b * step1((x + (1 / b)))) - ((2 * b) * step1(x))) + (b * step1((x - (1 / b)))))<|docstring|>Triangle function tri(bx) where tri(x) = rect(x) * rect(x).<|endoftext|> |
381f70b8582c9fd8e289865d68bb77a807112801170378f9493e101a95f4e5be | def rtri(x, a, b):
'Convolution of rect(ax) with tri(bx).'
assert (a > 0)
assert (b > 0)
return (b * (((((step2(((x + (1 / (2 * a))) + (1 / b))) - (2 * step2((x + (1 / (2 * a)))))) + step2(((x + (1 / (2 * a))) - (1 / b)))) - step2(((x - (1 / (2 * a))) + (1 / b)))) + (2 * step2((x - (1 / (2 * a)))))) - step2(((x - (1 / (2 * a))) - (1 / b))))) | Convolution of rect(ax) with tri(bx). | pyinverse/rect.py | rtri | butala/pyinverse | 1 | python | def rtri(x, a, b):
assert (a > 0)
assert (b > 0)
return (b * (((((step2(((x + (1 / (2 * a))) + (1 / b))) - (2 * step2((x + (1 / (2 * a)))))) + step2(((x + (1 / (2 * a))) - (1 / b)))) - step2(((x - (1 / (2 * a))) + (1 / b)))) + (2 * step2((x - (1 / (2 * a)))))) - step2(((x - (1 / (2 * a))) - (1 / b))))) | def rtri(x, a, b):
assert (a > 0)
assert (b > 0)
return (b * (((((step2(((x + (1 / (2 * a))) + (1 / b))) - (2 * step2((x + (1 / (2 * a)))))) + step2(((x + (1 / (2 * a))) - (1 / b)))) - step2(((x - (1 / (2 * a))) + (1 / b)))) + (2 * step2((x - (1 / (2 * a)))))) - step2(((x - (1 / (2 * a))) - (1 / b)))))<|docstring|>Convolution of rect(ax) with tri(bx).<|endoftext|> |
6cd9428c040be1a47abbccbfda4c8909c809789ea00b5d78bd9d874ba70b8ae6 | def square_proj_conv_rect(theta, r, a):
'Projection of square function convolved with rect(ax).'
assert (a > 0)
theta = (theta % (2 * np.pi))
if (theta in [(np.pi / 4), ((3 * np.pi) / 4), ((5 * np.pi) / 4), ((7 * np.pi) / 4)]):
return ((np.sqrt(2) * a) * rtri(r, a, (1 / (np.sqrt(2) / 2))))
elif (np.abs(theta) in [0, (np.pi / 2), np.pi, ((3 * np.pi) / 2)]):
return (a * rect_conv_rect(r, a=a))
else:
d_max = ((np.abs(np.cos(theta)) + np.abs(np.sin(theta))) / 2)
d_break = (np.abs((np.abs(np.cos(theta)) - np.abs(np.sin(theta)))) / 2)
return ((1 / np.abs((np.cos(theta) * np.sin(theta)))) * (((d_max * a) * rtri(r, a, (1 / d_max))) - ((d_break * a) * rtri(r, a, (1 / d_break))))) | Projection of square function convolved with rect(ax). | pyinverse/rect.py | square_proj_conv_rect | butala/pyinverse | 1 | python | def square_proj_conv_rect(theta, r, a):
assert (a > 0)
theta = (theta % (2 * np.pi))
if (theta in [(np.pi / 4), ((3 * np.pi) / 4), ((5 * np.pi) / 4), ((7 * np.pi) / 4)]):
return ((np.sqrt(2) * a) * rtri(r, a, (1 / (np.sqrt(2) / 2))))
elif (np.abs(theta) in [0, (np.pi / 2), np.pi, ((3 * np.pi) / 2)]):
return (a * rect_conv_rect(r, a=a))
else:
d_max = ((np.abs(np.cos(theta)) + np.abs(np.sin(theta))) / 2)
d_break = (np.abs((np.abs(np.cos(theta)) - np.abs(np.sin(theta)))) / 2)
return ((1 / np.abs((np.cos(theta) * np.sin(theta)))) * (((d_max * a) * rtri(r, a, (1 / d_max))) - ((d_break * a) * rtri(r, a, (1 / d_break))))) | def square_proj_conv_rect(theta, r, a):
assert (a > 0)
theta = (theta % (2 * np.pi))
if (theta in [(np.pi / 4), ((3 * np.pi) / 4), ((5 * np.pi) / 4), ((7 * np.pi) / 4)]):
return ((np.sqrt(2) * a) * rtri(r, a, (1 / (np.sqrt(2) / 2))))
elif (np.abs(theta) in [0, (np.pi / 2), np.pi, ((3 * np.pi) / 2)]):
return (a * rect_conv_rect(r, a=a))
else:
d_max = ((np.abs(np.cos(theta)) + np.abs(np.sin(theta))) / 2)
d_break = (np.abs((np.abs(np.cos(theta)) - np.abs(np.sin(theta)))) / 2)
return ((1 / np.abs((np.cos(theta) * np.sin(theta)))) * (((d_max * a) * rtri(r, a, (1 / d_max))) - ((d_break * a) * rtri(r, a, (1 / d_break)))))<|docstring|>Projection of square function convolved with rect(ax).<|endoftext|> |
d053bcb82965036baba056d98c2cf403ec92acafbdaabf2000d4a1e65a996304 | @pytest.fixture()
def c(app, db, location):
'A community fixture.'
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id) | A community fixture. | tests/records/test_mockrecords_api.py | c | ntarocco/invenio-communities | 3 | python | @pytest.fixture()
def c(app, db, location):
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id) | @pytest.fixture()
def c(app, db, location):
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id)<|docstring|>A community fixture.<|endoftext|> |
aaaecfaed6bc44008b9360e04ec6faf229fc12c86a7fdb8e9c71988fa5a2a770 | @pytest.fixture()
def c2(app, db, location):
'Another community fixture.'
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id) | Another community fixture. | tests/records/test_mockrecords_api.py | c2 | ntarocco/invenio-communities | 3 | python | @pytest.fixture()
def c2(app, db, location):
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id) | @pytest.fixture()
def c2(app, db, location):
_c = Community.create({})
db.session.commit()
return Community.get_record(_c.id)<|docstring|>Another community fixture.<|endoftext|> |
1de24d84d6a2ec3035f48730b9b60ad9cb8abfc300e4d18dde71e5466bb4861c | @pytest.fixture()
def record(app, db, c):
'A community fixture.'
r = MockRecord.create({})
r.communities.add(c, default=True)
r.commit()
db.session.commit()
return r | A community fixture. | tests/records/test_mockrecords_api.py | record | ntarocco/invenio-communities | 3 | python | @pytest.fixture()
def record(app, db, c):
r = MockRecord.create({})
r.communities.add(c, default=True)
r.commit()
db.session.commit()
return r | @pytest.fixture()
def record(app, db, c):
r = MockRecord.create({})
r.communities.add(c, default=True)
r.commit()
db.session.commit()
return r<|docstring|>A community fixture.<|endoftext|> |
6e66921deba021dc119117ec6384b11da8166fea5aaa8456853adb5477020c5d | def test_record_create_empty(app, db):
'Smoke test.'
record = MockRecord.create({})
db.session.commit()
assert record.schema
pytest.raises(ValidationError, MockRecord.create, {'metadata': {'title': 1}}) | Smoke test. | tests/records/test_mockrecords_api.py | test_record_create_empty | ntarocco/invenio-communities | 3 | python | def test_record_create_empty(app, db):
record = MockRecord.create({})
db.session.commit()
assert record.schema
pytest.raises(ValidationError, MockRecord.create, {'metadata': {'title': 1}}) | def test_record_create_empty(app, db):
record = MockRecord.create({})
db.session.commit()
assert record.schema
pytest.raises(ValidationError, MockRecord.create, {'metadata': {'title': 1}})<|docstring|>Smoke test.<|endoftext|> |
334d1a6643725a7942ce73c9b94b76caff6954bfc6ab78b3536af91d12a199b3 | def test_get(db, record, c):
'Loading a record should load communties and default.'
r = MockRecord.get_record(record.id)
assert (c in r.communities)
assert (r.communities.default == c) | Loading a record should load communties and default. | tests/records/test_mockrecords_api.py | test_get | ntarocco/invenio-communities | 3 | python | def test_get(db, record, c):
r = MockRecord.get_record(record.id)
assert (c in r.communities)
assert (r.communities.default == c) | def test_get(db, record, c):
r = MockRecord.get_record(record.id)
assert (c in r.communities)
assert (r.communities.default == c)<|docstring|>Loading a record should load communties and default.<|endoftext|> |
d6952b8a9b74f0a9d18f445cff9df970a64a1e0de72f6f9a59c514a78b26d5ad | def test_add(db, c):
'Test adding a record to a community.'
record = MockRecord.create({})
record.communities.add(c, default=True)
assert (record.communities.default == c)
record.commit()
assert (record['communities'] == {'default': str(c.id), 'ids': [str(c.id)]})
db.session.commit()
record = MockRecord.create({})
record.communities.add(c)
assert (record.communities.default is None)
record.commit()
assert (record['communities'] == {'ids': [str(c.id)]})
db.session.commit() | Test adding a record to a community. | tests/records/test_mockrecords_api.py | test_add | ntarocco/invenio-communities | 3 | python | def test_add(db, c):
record = MockRecord.create({})
record.communities.add(c, default=True)
assert (record.communities.default == c)
record.commit()
assert (record['communities'] == {'default': str(c.id), 'ids': [str(c.id)]})
db.session.commit()
record = MockRecord.create({})
record.communities.add(c)
assert (record.communities.default is None)
record.commit()
assert (record['communities'] == {'ids': [str(c.id)]})
db.session.commit() | def test_add(db, c):
record = MockRecord.create({})
record.communities.add(c, default=True)
assert (record.communities.default == c)
record.commit()
assert (record['communities'] == {'default': str(c.id), 'ids': [str(c.id)]})
db.session.commit()
record = MockRecord.create({})
record.communities.add(c)
assert (record.communities.default is None)
record.commit()
assert (record['communities'] == {'ids': [str(c.id)]})
db.session.commit()<|docstring|>Test adding a record to a community.<|endoftext|> |
4adcafc659e7dfbaa19a7d48d774100918ab9d8020b7dda501dc6f0eba173409 | def test_add_existing(db, c):
'Test addding same community twice.'
record = MockRecord.create({})
record.communities.add(c)
record.communities.add(c)
pytest.raises(IntegrityError, record.commit)
db.session.rollback() | Test addding same community twice. | tests/records/test_mockrecords_api.py | test_add_existing | ntarocco/invenio-communities | 3 | python | def test_add_existing(db, c):
record = MockRecord.create({})
record.communities.add(c)
record.communities.add(c)
pytest.raises(IntegrityError, record.commit)
db.session.rollback() | def test_add_existing(db, c):
record = MockRecord.create({})
record.communities.add(c)
record.communities.add(c)
pytest.raises(IntegrityError, record.commit)
db.session.rollback()<|docstring|>Test addding same community twice.<|endoftext|> |
a4c2d3ba969d222f7fbb466b24af3131d91554839c12491ada6c4fd4ffd817cc | def test_remove(db, c, record):
'Test removal of community.'
record.communities.remove(c)
assert (len(record.communities) == 0)
record.commit()
assert (record['communities'] == {})
db.session.commit()
pytest.raises(ValueError, record.communities.remove, c2) | Test removal of community. | tests/records/test_mockrecords_api.py | test_remove | ntarocco/invenio-communities | 3 | python | def test_remove(db, c, record):
record.communities.remove(c)
assert (len(record.communities) == 0)
record.commit()
assert (record['communities'] == {})
db.session.commit()
pytest.raises(ValueError, record.communities.remove, c2) | def test_remove(db, c, record):
record.communities.remove(c)
assert (len(record.communities) == 0)
record.commit()
assert (record['communities'] == {})
db.session.commit()
pytest.raises(ValueError, record.communities.remove, c2)<|docstring|>Test removal of community.<|endoftext|> |
db4a186363730283be3acfb2d1420a072674832564e355eb1e05d07e6d05017e | def Plot_SNR(var_x, sample_x, var_y, sample_y, SNRMatrix, fig=None, ax=None, display=True, return_plt=False, dl_axis=False, lb_axis=False, smooth_contours=True, cfill=True, display_cbar=True, x_axis_label=True, y_axis_label=True, x_axis_line=None, y_axis_line=None, logLevels_min=(- 1.0), logLevels_max=0.0, hspace=0.15, wspace=0.1, contour_kwargs={}, contourf_kwargs={}, xticklabels_kwargs={}, xlabels_kwargs={}, xline_kwargs={}, yticklabels_kwargs={}, ylabels_kwargs={}, yline_kwargs={}):
'Plots the SNR contours from calcSNR\n\n Parameters\n ----------\n var_x: str\n x-axis variable\n sample_x: array\n samples at which ``SNRMatrix`` was calculated corresponding to the x-axis variable\n var_y: str\n y-axis variable\n sample_y: array\n samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable\n SNRMatrix: array-like\n the matrix at which the SNR was calculated corresponding to the particular x and y-axis variable choices\n\n fig: object, optional\n matplotlib figure object on which to collate the individual plots\n ax: object, optional\n matplotlib axes object on which to plot the individual plot\n display: bool, optional\n Option to turn off display if saving multiple plots to a file\n return_plt: bool, optional\n Option to return ``fig`` and ``ax``\n dl_axis: bool, optional\n Option to turn on the right hand side labels of luminosity distance\n lb_axis: bool, optional\n Option to turn on the right hand side labels of lookback time\n smooth_contours: bool, optional\n Option to have contours appear smooth instead of tiered (depending on sample size the edges appear boxey).\n cfill: bool, optional\n Option to use filled contours or not, default is ``True``\n display_cbar: bool, optional\n Option to display the colorbar on the axes object\n x_axis_label: bool, optional\n Option to display the x axis label\n y_axis_label: bool, optional\n Option to display the y axis label\n x_axis_line: int,float, optional\n Option to display a line on the x axis if not None\n y_axis_line: int,float, optional\n Option to display a line on the y axis if not None\n logLevels_min: float, optional\n Sets the minimum log level of the colorbar, default is -1.0 which set the minimum to the log minimum of the given ``SNRMatrix``\n logLevels_max: float, optional\n Sets the maximum log level of the colorbar, default is 0.0, which sets the maximum to the log maximum value of the given ``SNRMatrix``\n hspace: float, optional\n Sets the vertical space between axes objects, default is 0.15\n wspace: float, optional\n Sets the horizontal space between axes objects, default is 0.1\n contour_kwargs: dict, optional\n Sets additional kwargs taken by contour in matplotlib\n contourf_kwargs: dict, optional\n Sets additional kwargs taken by contourf in matplotlib\n xticklabels_kwargs: dict, optional\n Sets additional kwargs taken by xticklabel in matplotlib\n xlabels_kwargs=: dict, optional\n Sets additional kwargs taken by xlabel in matplotlib\n xline_kwargs: dict, optional\n Sets additional kwargs taken by ax.axvline in matplotlib\n yticklabels_kwargs: dict, optional\n Sets additional kwargs taken by yticklabel in matplotlib\n ylabels_kwargs: dict, optional\n Sets additional kwargs taken by ylabel in matplotlib\n yline_kwargs: dict, optional\n Sets additional kwargs taken by ax.axhline in matplotlib\n\n '
if (fig is not None):
if (ax is not None):
pass
else:
(fig, ax) = plt.subplots()
else:
(fig, ax) = plt.subplots()
if (('colors' not in contour_kwargs.keys()) and ('cmap' not in contour_kwargs.keys())):
contour_kwargs['colors'] = 'k'
if ('linewidths' not in contour_kwargs.keys()):
contour_kwargs['linewidths'] = 2.0
if ('cmap' not in contourf_kwargs.keys()):
contourf_kwargs['cmap'] = 'viridis'
logSNR = np.log10(SNRMatrix)
if (logLevels_min == (- 1.0)):
logLevels_min = np.log10(np.array([1.0]))
if (logLevels_max == 0.0):
logLevels_max = np.ceil(np.amax(logSNR))
if (logLevels_max < logLevels_min):
raise ValueError('All SNRs are lower than 5.')
logLevels_add = np.log10(np.array([3.0, 10.0, 31.0]))
print_logLevels = np.concatenate((logLevels_min, logLevels_add, np.arange(2.0, (logLevels_max + 1.0))))
logLevels = print_logLevels
ylabel_min = min(sample_y)
ylabel_max = max(sample_y)
xlabel_min = min(sample_x)
xlabel_max = max(sample_x)
if ((xlabel_max < 0.0) or (xlabel_min < 0.0) or (var_x in ['n_p', 'T_obs'])):
xaxis_type = 'lin'
step_size = int(((xlabel_max - xlabel_min) + 1))
x_labels = np.linspace(xlabel_min, xlabel_max, step_size)
else:
x_log_range = (np.log10(xlabel_max) - np.log10(xlabel_min))
if (x_log_range >= 2.0):
xaxis_type = 'log'
step_size = int(((np.log10(xlabel_max) - np.log10(xlabel_min)) + 1))
x_labels = np.logspace(np.log10(xlabel_min), np.log10(xlabel_max), step_size)
else:
xaxis_type = 'lin'
x_scale = (10 ** round(np.log10(xlabel_min)))
x_labels = (np.arange(round((xlabel_min / x_scale)), (round((xlabel_max / x_scale)) + 1), 1) * x_scale)
if (x_labels[0] < xlabel_min):
x_labels[0] = xlabel_min
if (x_labels[(- 1)] > xlabel_max):
x_labels[(- 1)] = xlabel_max
if ((ylabel_max < 0.0) or (ylabel_min < 0.0) or (var_y in ['n_p', 'T_obs'])):
yaxis_type = 'lin'
step_size = int(((ylabel_max - ylabel_min) + 1))
y_labels = np.linspace(ylabel_min, ylabel_max, step_size)
else:
y_log_range = (np.log10(ylabel_max) - np.log10(ylabel_min))
if (y_log_range >= 2.0):
yaxis_type = 'log'
step_size = int(((np.log10(ylabel_max) - np.log10(ylabel_min)) + 1))
y_labels = np.logspace(np.log10(ylabel_min), np.log10(ylabel_max), step_size)
else:
yaxis_type = 'lin'
y_scale = (10 ** round(np.log10(ylabel_min)))
y_labels = (np.arange(round((ylabel_min / y_scale)), (round((ylabel_max / y_scale)) + 1), 1) * y_scale)
if (y_labels[0] < ylabel_min):
y_labels[0] = ylabel_min
if (y_labels[(- 1)] > ylabel_max):
y_labels[(- 1)] = ylabel_max
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
if (not cfill):
CS1 = ax.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[np.log10(xlabel_min), np.log10(xlabel_max), ylabel_min, ylabel_max], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(np.log10(sample_x), sample_y, logSNR, logLevels, **contourf_kwargs)
ax.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(np.log10(xlabel_min), np.log10(xlabel_max))
ax.set_ylim(ylabel_min, ylabel_max)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
if (not cfill):
CS1 = ax.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[xlabel_min, xlabel_max, np.log10(ylabel_min), np.log10(ylabel_max)], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(sample_x, np.log10(sample_y), logSNR, logLevels, **contourf_kwargs)
ax.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(xlabel_min, xlabel_max)
ax.set_ylim(np.log10(ylabel_min), np.log10(ylabel_max))
elif ((yaxis_type == 'lin') and (xaxis_type == 'lin')):
if (not cfill):
CS1 = ax.contour(sample_x, sample_y, logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[xlabel_min, xlabel_max, ylabel_min, ylabel_max], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(sample_x, sample_y, logSNR, logLevels, **contourf_kwargs)
ax.contour(sample_x, sample_y, logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(xlabel_min, xlabel_max)
ax.set_ylim(ylabel_min, ylabel_max)
else:
if (not cfill):
CS1 = ax.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[np.log10(xlabel_min), np.log10(xlabel_max), np.log10(ylabel_min), np.log10(ylabel_max)], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap, interpolation='None')
else:
CS1 = ax.contourf(np.log10(sample_x), np.log10(sample_y), logSNR, logLevels, **contourf_kwargs)
ax.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(np.log10(xlabel_min), np.log10(xlabel_max))
ax.set_ylim(np.log10(ylabel_min), np.log10(ylabel_max))
Get_Axes_Labels(ax, 'x', var_x, xaxis_type, x_labels, x_axis_line, xlabels_kwargs, xticklabels_kwargs, xline_kwargs)
Get_Axes_Labels(ax, 'y', var_y, yaxis_type, y_labels, y_axis_line, ylabels_kwargs, yticklabels_kwargs, yline_kwargs)
if (not x_axis_label):
ax.set_xticklabels('')
ax.set_xlabel('')
if (not y_axis_label):
ax.set_yticklabels('')
ax.set_ylabel('')
if dl_axis:
if (var_y != 'z'):
raise ValueError('Sorry, we can only plot luminosity distance when redshift is on the y axis.')
ax2 = ax.twinx()
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
ax2.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
ax2.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
ax2.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
dists_min = cosmo.luminosity_distance(ylabel_min).to('Gpc')
dists_min = np.ceil(np.log10(dists_min.value))
dists_max = cosmo.luminosity_distance(ylabel_max).to('Gpc')
dists_max = np.ceil(np.log10(dists_max.value))
dists = np.arange(dists_min, dists_max)
dists = ((10 ** dists) * u.Gpc)
distticks = [z_at_value(cosmo.luminosity_distance, dist) for dist in dists]
ax2.set_yticks(np.log10(distticks))
ax2.set_yticklabels([(('$10^{%i}$' % np.log10(dist)) if (np.abs(int(np.log10(dist))) > 1) else '{:g}'.format(dist)) for dist in dists.value])
ax2.set_ylabel('$D_{L}$ [Gpc]')
elif lb_axis:
if (var_y != 'z'):
raise ValueError('Sorry, we can only plot lookback time when redshift is on the y axis.')
ax2 = ax.twinx()
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
ax2.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
ax2.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
ax2.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ages1 = (np.array([13.5, 13, 10, 5, 1]) * u.Gyr)
ages2 = (np.array([500, 100, 10, 1]) * u.Myr)
ages2 = ages2.to('Gyr')
ages = np.hstack((ages1.value, ages2.value))
ages = (ages * u.Gyr)
ageticks = [z_at_value(cosmo.age, age) for age in ages]
ax2.set_yticks(np.log10(ageticks))
ax2.set_yticklabels(['{:g}'.format(age) for age in ages.value])
ax2.set_ylabel('$t_{\\rm cosmic}$ [Gyr]')
ax2.yaxis.set_label_coords(1.2, 0.5)
if display_cbar:
if (lb_axis or dl_axis):
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7])
if (not cfill):
norm = colors.Normalize(vmin=logLevels_min, vmax=logLevels_max)
tick_levels = np.linspace(float(logLevels_min), logLevels_max, len(print_logLevels))
cbar = mpl.colorbar.ColorbarBase(cbar_ax, ax=(ax, ax2), pad=0.01, cmap=CS1.cmap, norm=norm, boundaries=tick_levels, ticks=tick_levels, spacing='proportional')
else:
cbar = fig.colorbar(CS1, cax=cbar_ax, ax=(ax, ax2), pad=0.01)
else:
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.82, 0.15, 0.025, 0.7])
if (not cfill):
norm = colors.Normalize(vmin=logLevels_min, vmax=logLevels_max)
tick_levels = np.linspace(float(logLevels_min), logLevels_max, len(print_logLevels))
cbar = mpl.colorbar.ColorbarBase(cbar_ax, cmap=CS1.cmap, norm=norm, boundaries=tick_levels, ticks=tick_levels, spacing='proportional')
else:
cbar = fig.colorbar(CS1, cax=cbar_ax, ticks=print_logLevels)
cbar.set_label('SNR')
cbar.ax.set_yticklabels([(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in print_logLevels], **yticklabels_kwargs)
if display:
fig.subplots_adjust(hspace=hspace, wspace=wspace)
plt.show()
if return_plt:
return (fig, ax) | Plots the SNR contours from calcSNR
Parameters
----------
var_x: str
x-axis variable
sample_x: array
samples at which ``SNRMatrix`` was calculated corresponding to the x-axis variable
var_y: str
y-axis variable
sample_y: array
samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable
SNRMatrix: array-like
the matrix at which the SNR was calculated corresponding to the particular x and y-axis variable choices
fig: object, optional
matplotlib figure object on which to collate the individual plots
ax: object, optional
matplotlib axes object on which to plot the individual plot
display: bool, optional
Option to turn off display if saving multiple plots to a file
return_plt: bool, optional
Option to return ``fig`` and ``ax``
dl_axis: bool, optional
Option to turn on the right hand side labels of luminosity distance
lb_axis: bool, optional
Option to turn on the right hand side labels of lookback time
smooth_contours: bool, optional
Option to have contours appear smooth instead of tiered (depending on sample size the edges appear boxey).
cfill: bool, optional
Option to use filled contours or not, default is ``True``
display_cbar: bool, optional
Option to display the colorbar on the axes object
x_axis_label: bool, optional
Option to display the x axis label
y_axis_label: bool, optional
Option to display the y axis label
x_axis_line: int,float, optional
Option to display a line on the x axis if not None
y_axis_line: int,float, optional
Option to display a line on the y axis if not None
logLevels_min: float, optional
Sets the minimum log level of the colorbar, default is -1.0 which set the minimum to the log minimum of the given ``SNRMatrix``
logLevels_max: float, optional
Sets the maximum log level of the colorbar, default is 0.0, which sets the maximum to the log maximum value of the given ``SNRMatrix``
hspace: float, optional
Sets the vertical space between axes objects, default is 0.15
wspace: float, optional
Sets the horizontal space between axes objects, default is 0.1
contour_kwargs: dict, optional
Sets additional kwargs taken by contour in matplotlib
contourf_kwargs: dict, optional
Sets additional kwargs taken by contourf in matplotlib
xticklabels_kwargs: dict, optional
Sets additional kwargs taken by xticklabel in matplotlib
xlabels_kwargs=: dict, optional
Sets additional kwargs taken by xlabel in matplotlib
xline_kwargs: dict, optional
Sets additional kwargs taken by ax.axvline in matplotlib
yticklabels_kwargs: dict, optional
Sets additional kwargs taken by yticklabel in matplotlib
ylabels_kwargs: dict, optional
Sets additional kwargs taken by ylabel in matplotlib
yline_kwargs: dict, optional
Sets additional kwargs taken by ax.axhline in matplotlib | gwent/snrplot.py | Plot_SNR | ark0015/GWDetectorDesignToolkit | 14 | python | def Plot_SNR(var_x, sample_x, var_y, sample_y, SNRMatrix, fig=None, ax=None, display=True, return_plt=False, dl_axis=False, lb_axis=False, smooth_contours=True, cfill=True, display_cbar=True, x_axis_label=True, y_axis_label=True, x_axis_line=None, y_axis_line=None, logLevels_min=(- 1.0), logLevels_max=0.0, hspace=0.15, wspace=0.1, contour_kwargs={}, contourf_kwargs={}, xticklabels_kwargs={}, xlabels_kwargs={}, xline_kwargs={}, yticklabels_kwargs={}, ylabels_kwargs={}, yline_kwargs={}):
'Plots the SNR contours from calcSNR\n\n Parameters\n ----------\n var_x: str\n x-axis variable\n sample_x: array\n samples at which ``SNRMatrix`` was calculated corresponding to the x-axis variable\n var_y: str\n y-axis variable\n sample_y: array\n samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable\n SNRMatrix: array-like\n the matrix at which the SNR was calculated corresponding to the particular x and y-axis variable choices\n\n fig: object, optional\n matplotlib figure object on which to collate the individual plots\n ax: object, optional\n matplotlib axes object on which to plot the individual plot\n display: bool, optional\n Option to turn off display if saving multiple plots to a file\n return_plt: bool, optional\n Option to return ``fig`` and ``ax``\n dl_axis: bool, optional\n Option to turn on the right hand side labels of luminosity distance\n lb_axis: bool, optional\n Option to turn on the right hand side labels of lookback time\n smooth_contours: bool, optional\n Option to have contours appear smooth instead of tiered (depending on sample size the edges appear boxey).\n cfill: bool, optional\n Option to use filled contours or not, default is ``True``\n display_cbar: bool, optional\n Option to display the colorbar on the axes object\n x_axis_label: bool, optional\n Option to display the x axis label\n y_axis_label: bool, optional\n Option to display the y axis label\n x_axis_line: int,float, optional\n Option to display a line on the x axis if not None\n y_axis_line: int,float, optional\n Option to display a line on the y axis if not None\n logLevels_min: float, optional\n Sets the minimum log level of the colorbar, default is -1.0 which set the minimum to the log minimum of the given ``SNRMatrix``\n logLevels_max: float, optional\n Sets the maximum log level of the colorbar, default is 0.0, which sets the maximum to the log maximum value of the given ``SNRMatrix``\n hspace: float, optional\n Sets the vertical space between axes objects, default is 0.15\n wspace: float, optional\n Sets the horizontal space between axes objects, default is 0.1\n contour_kwargs: dict, optional\n Sets additional kwargs taken by contour in matplotlib\n contourf_kwargs: dict, optional\n Sets additional kwargs taken by contourf in matplotlib\n xticklabels_kwargs: dict, optional\n Sets additional kwargs taken by xticklabel in matplotlib\n xlabels_kwargs=: dict, optional\n Sets additional kwargs taken by xlabel in matplotlib\n xline_kwargs: dict, optional\n Sets additional kwargs taken by ax.axvline in matplotlib\n yticklabels_kwargs: dict, optional\n Sets additional kwargs taken by yticklabel in matplotlib\n ylabels_kwargs: dict, optional\n Sets additional kwargs taken by ylabel in matplotlib\n yline_kwargs: dict, optional\n Sets additional kwargs taken by ax.axhline in matplotlib\n\n '
if (fig is not None):
if (ax is not None):
pass
else:
(fig, ax) = plt.subplots()
else:
(fig, ax) = plt.subplots()
if (('colors' not in contour_kwargs.keys()) and ('cmap' not in contour_kwargs.keys())):
contour_kwargs['colors'] = 'k'
if ('linewidths' not in contour_kwargs.keys()):
contour_kwargs['linewidths'] = 2.0
if ('cmap' not in contourf_kwargs.keys()):
contourf_kwargs['cmap'] = 'viridis'
logSNR = np.log10(SNRMatrix)
if (logLevels_min == (- 1.0)):
logLevels_min = np.log10(np.array([1.0]))
if (logLevels_max == 0.0):
logLevels_max = np.ceil(np.amax(logSNR))
if (logLevels_max < logLevels_min):
raise ValueError('All SNRs are lower than 5.')
logLevels_add = np.log10(np.array([3.0, 10.0, 31.0]))
print_logLevels = np.concatenate((logLevels_min, logLevels_add, np.arange(2.0, (logLevels_max + 1.0))))
logLevels = print_logLevels
ylabel_min = min(sample_y)
ylabel_max = max(sample_y)
xlabel_min = min(sample_x)
xlabel_max = max(sample_x)
if ((xlabel_max < 0.0) or (xlabel_min < 0.0) or (var_x in ['n_p', 'T_obs'])):
xaxis_type = 'lin'
step_size = int(((xlabel_max - xlabel_min) + 1))
x_labels = np.linspace(xlabel_min, xlabel_max, step_size)
else:
x_log_range = (np.log10(xlabel_max) - np.log10(xlabel_min))
if (x_log_range >= 2.0):
xaxis_type = 'log'
step_size = int(((np.log10(xlabel_max) - np.log10(xlabel_min)) + 1))
x_labels = np.logspace(np.log10(xlabel_min), np.log10(xlabel_max), step_size)
else:
xaxis_type = 'lin'
x_scale = (10 ** round(np.log10(xlabel_min)))
x_labels = (np.arange(round((xlabel_min / x_scale)), (round((xlabel_max / x_scale)) + 1), 1) * x_scale)
if (x_labels[0] < xlabel_min):
x_labels[0] = xlabel_min
if (x_labels[(- 1)] > xlabel_max):
x_labels[(- 1)] = xlabel_max
if ((ylabel_max < 0.0) or (ylabel_min < 0.0) or (var_y in ['n_p', 'T_obs'])):
yaxis_type = 'lin'
step_size = int(((ylabel_max - ylabel_min) + 1))
y_labels = np.linspace(ylabel_min, ylabel_max, step_size)
else:
y_log_range = (np.log10(ylabel_max) - np.log10(ylabel_min))
if (y_log_range >= 2.0):
yaxis_type = 'log'
step_size = int(((np.log10(ylabel_max) - np.log10(ylabel_min)) + 1))
y_labels = np.logspace(np.log10(ylabel_min), np.log10(ylabel_max), step_size)
else:
yaxis_type = 'lin'
y_scale = (10 ** round(np.log10(ylabel_min)))
y_labels = (np.arange(round((ylabel_min / y_scale)), (round((ylabel_max / y_scale)) + 1), 1) * y_scale)
if (y_labels[0] < ylabel_min):
y_labels[0] = ylabel_min
if (y_labels[(- 1)] > ylabel_max):
y_labels[(- 1)] = ylabel_max
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
if (not cfill):
CS1 = ax.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[np.log10(xlabel_min), np.log10(xlabel_max), ylabel_min, ylabel_max], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(np.log10(sample_x), sample_y, logSNR, logLevels, **contourf_kwargs)
ax.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(np.log10(xlabel_min), np.log10(xlabel_max))
ax.set_ylim(ylabel_min, ylabel_max)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
if (not cfill):
CS1 = ax.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[xlabel_min, xlabel_max, np.log10(ylabel_min), np.log10(ylabel_max)], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(sample_x, np.log10(sample_y), logSNR, logLevels, **contourf_kwargs)
ax.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(xlabel_min, xlabel_max)
ax.set_ylim(np.log10(ylabel_min), np.log10(ylabel_max))
elif ((yaxis_type == 'lin') and (xaxis_type == 'lin')):
if (not cfill):
CS1 = ax.contour(sample_x, sample_y, logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[xlabel_min, xlabel_max, ylabel_min, ylabel_max], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(sample_x, sample_y, logSNR, logLevels, **contourf_kwargs)
ax.contour(sample_x, sample_y, logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(xlabel_min, xlabel_max)
ax.set_ylim(ylabel_min, ylabel_max)
else:
if (not cfill):
CS1 = ax.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[np.log10(xlabel_min), np.log10(xlabel_max), np.log10(ylabel_min), np.log10(ylabel_max)], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap, interpolation='None')
else:
CS1 = ax.contourf(np.log10(sample_x), np.log10(sample_y), logSNR, logLevels, **contourf_kwargs)
ax.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(np.log10(xlabel_min), np.log10(xlabel_max))
ax.set_ylim(np.log10(ylabel_min), np.log10(ylabel_max))
Get_Axes_Labels(ax, 'x', var_x, xaxis_type, x_labels, x_axis_line, xlabels_kwargs, xticklabels_kwargs, xline_kwargs)
Get_Axes_Labels(ax, 'y', var_y, yaxis_type, y_labels, y_axis_line, ylabels_kwargs, yticklabels_kwargs, yline_kwargs)
if (not x_axis_label):
ax.set_xticklabels()
ax.set_xlabel()
if (not y_axis_label):
ax.set_yticklabels()
ax.set_ylabel()
if dl_axis:
if (var_y != 'z'):
raise ValueError('Sorry, we can only plot luminosity distance when redshift is on the y axis.')
ax2 = ax.twinx()
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
ax2.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
ax2.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
ax2.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
dists_min = cosmo.luminosity_distance(ylabel_min).to('Gpc')
dists_min = np.ceil(np.log10(dists_min.value))
dists_max = cosmo.luminosity_distance(ylabel_max).to('Gpc')
dists_max = np.ceil(np.log10(dists_max.value))
dists = np.arange(dists_min, dists_max)
dists = ((10 ** dists) * u.Gpc)
distticks = [z_at_value(cosmo.luminosity_distance, dist) for dist in dists]
ax2.set_yticks(np.log10(distticks))
ax2.set_yticklabels([(('$10^{%i}$' % np.log10(dist)) if (np.abs(int(np.log10(dist))) > 1) else '{:g}'.format(dist)) for dist in dists.value])
ax2.set_ylabel('$D_{L}$ [Gpc]')
elif lb_axis:
if (var_y != 'z'):
raise ValueError('Sorry, we can only plot lookback time when redshift is on the y axis.')
ax2 = ax.twinx()
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
ax2.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
ax2.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
ax2.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ages1 = (np.array([13.5, 13, 10, 5, 1]) * u.Gyr)
ages2 = (np.array([500, 100, 10, 1]) * u.Myr)
ages2 = ages2.to('Gyr')
ages = np.hstack((ages1.value, ages2.value))
ages = (ages * u.Gyr)
ageticks = [z_at_value(cosmo.age, age) for age in ages]
ax2.set_yticks(np.log10(ageticks))
ax2.set_yticklabels(['{:g}'.format(age) for age in ages.value])
ax2.set_ylabel('$t_{\\rm cosmic}$ [Gyr]')
ax2.yaxis.set_label_coords(1.2, 0.5)
if display_cbar:
if (lb_axis or dl_axis):
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7])
if (not cfill):
norm = colors.Normalize(vmin=logLevels_min, vmax=logLevels_max)
tick_levels = np.linspace(float(logLevels_min), logLevels_max, len(print_logLevels))
cbar = mpl.colorbar.ColorbarBase(cbar_ax, ax=(ax, ax2), pad=0.01, cmap=CS1.cmap, norm=norm, boundaries=tick_levels, ticks=tick_levels, spacing='proportional')
else:
cbar = fig.colorbar(CS1, cax=cbar_ax, ax=(ax, ax2), pad=0.01)
else:
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.82, 0.15, 0.025, 0.7])
if (not cfill):
norm = colors.Normalize(vmin=logLevels_min, vmax=logLevels_max)
tick_levels = np.linspace(float(logLevels_min), logLevels_max, len(print_logLevels))
cbar = mpl.colorbar.ColorbarBase(cbar_ax, cmap=CS1.cmap, norm=norm, boundaries=tick_levels, ticks=tick_levels, spacing='proportional')
else:
cbar = fig.colorbar(CS1, cax=cbar_ax, ticks=print_logLevels)
cbar.set_label('SNR')
cbar.ax.set_yticklabels([(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in print_logLevels], **yticklabels_kwargs)
if display:
fig.subplots_adjust(hspace=hspace, wspace=wspace)
plt.show()
if return_plt:
return (fig, ax) | def Plot_SNR(var_x, sample_x, var_y, sample_y, SNRMatrix, fig=None, ax=None, display=True, return_plt=False, dl_axis=False, lb_axis=False, smooth_contours=True, cfill=True, display_cbar=True, x_axis_label=True, y_axis_label=True, x_axis_line=None, y_axis_line=None, logLevels_min=(- 1.0), logLevels_max=0.0, hspace=0.15, wspace=0.1, contour_kwargs={}, contourf_kwargs={}, xticklabels_kwargs={}, xlabels_kwargs={}, xline_kwargs={}, yticklabels_kwargs={}, ylabels_kwargs={}, yline_kwargs={}):
'Plots the SNR contours from calcSNR\n\n Parameters\n ----------\n var_x: str\n x-axis variable\n sample_x: array\n samples at which ``SNRMatrix`` was calculated corresponding to the x-axis variable\n var_y: str\n y-axis variable\n sample_y: array\n samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable\n SNRMatrix: array-like\n the matrix at which the SNR was calculated corresponding to the particular x and y-axis variable choices\n\n fig: object, optional\n matplotlib figure object on which to collate the individual plots\n ax: object, optional\n matplotlib axes object on which to plot the individual plot\n display: bool, optional\n Option to turn off display if saving multiple plots to a file\n return_plt: bool, optional\n Option to return ``fig`` and ``ax``\n dl_axis: bool, optional\n Option to turn on the right hand side labels of luminosity distance\n lb_axis: bool, optional\n Option to turn on the right hand side labels of lookback time\n smooth_contours: bool, optional\n Option to have contours appear smooth instead of tiered (depending on sample size the edges appear boxey).\n cfill: bool, optional\n Option to use filled contours or not, default is ``True``\n display_cbar: bool, optional\n Option to display the colorbar on the axes object\n x_axis_label: bool, optional\n Option to display the x axis label\n y_axis_label: bool, optional\n Option to display the y axis label\n x_axis_line: int,float, optional\n Option to display a line on the x axis if not None\n y_axis_line: int,float, optional\n Option to display a line on the y axis if not None\n logLevels_min: float, optional\n Sets the minimum log level of the colorbar, default is -1.0 which set the minimum to the log minimum of the given ``SNRMatrix``\n logLevels_max: float, optional\n Sets the maximum log level of the colorbar, default is 0.0, which sets the maximum to the log maximum value of the given ``SNRMatrix``\n hspace: float, optional\n Sets the vertical space between axes objects, default is 0.15\n wspace: float, optional\n Sets the horizontal space between axes objects, default is 0.1\n contour_kwargs: dict, optional\n Sets additional kwargs taken by contour in matplotlib\n contourf_kwargs: dict, optional\n Sets additional kwargs taken by contourf in matplotlib\n xticklabels_kwargs: dict, optional\n Sets additional kwargs taken by xticklabel in matplotlib\n xlabels_kwargs=: dict, optional\n Sets additional kwargs taken by xlabel in matplotlib\n xline_kwargs: dict, optional\n Sets additional kwargs taken by ax.axvline in matplotlib\n yticklabels_kwargs: dict, optional\n Sets additional kwargs taken by yticklabel in matplotlib\n ylabels_kwargs: dict, optional\n Sets additional kwargs taken by ylabel in matplotlib\n yline_kwargs: dict, optional\n Sets additional kwargs taken by ax.axhline in matplotlib\n\n '
if (fig is not None):
if (ax is not None):
pass
else:
(fig, ax) = plt.subplots()
else:
(fig, ax) = plt.subplots()
if (('colors' not in contour_kwargs.keys()) and ('cmap' not in contour_kwargs.keys())):
contour_kwargs['colors'] = 'k'
if ('linewidths' not in contour_kwargs.keys()):
contour_kwargs['linewidths'] = 2.0
if ('cmap' not in contourf_kwargs.keys()):
contourf_kwargs['cmap'] = 'viridis'
logSNR = np.log10(SNRMatrix)
if (logLevels_min == (- 1.0)):
logLevels_min = np.log10(np.array([1.0]))
if (logLevels_max == 0.0):
logLevels_max = np.ceil(np.amax(logSNR))
if (logLevels_max < logLevels_min):
raise ValueError('All SNRs are lower than 5.')
logLevels_add = np.log10(np.array([3.0, 10.0, 31.0]))
print_logLevels = np.concatenate((logLevels_min, logLevels_add, np.arange(2.0, (logLevels_max + 1.0))))
logLevels = print_logLevels
ylabel_min = min(sample_y)
ylabel_max = max(sample_y)
xlabel_min = min(sample_x)
xlabel_max = max(sample_x)
if ((xlabel_max < 0.0) or (xlabel_min < 0.0) or (var_x in ['n_p', 'T_obs'])):
xaxis_type = 'lin'
step_size = int(((xlabel_max - xlabel_min) + 1))
x_labels = np.linspace(xlabel_min, xlabel_max, step_size)
else:
x_log_range = (np.log10(xlabel_max) - np.log10(xlabel_min))
if (x_log_range >= 2.0):
xaxis_type = 'log'
step_size = int(((np.log10(xlabel_max) - np.log10(xlabel_min)) + 1))
x_labels = np.logspace(np.log10(xlabel_min), np.log10(xlabel_max), step_size)
else:
xaxis_type = 'lin'
x_scale = (10 ** round(np.log10(xlabel_min)))
x_labels = (np.arange(round((xlabel_min / x_scale)), (round((xlabel_max / x_scale)) + 1), 1) * x_scale)
if (x_labels[0] < xlabel_min):
x_labels[0] = xlabel_min
if (x_labels[(- 1)] > xlabel_max):
x_labels[(- 1)] = xlabel_max
if ((ylabel_max < 0.0) or (ylabel_min < 0.0) or (var_y in ['n_p', 'T_obs'])):
yaxis_type = 'lin'
step_size = int(((ylabel_max - ylabel_min) + 1))
y_labels = np.linspace(ylabel_min, ylabel_max, step_size)
else:
y_log_range = (np.log10(ylabel_max) - np.log10(ylabel_min))
if (y_log_range >= 2.0):
yaxis_type = 'log'
step_size = int(((np.log10(ylabel_max) - np.log10(ylabel_min)) + 1))
y_labels = np.logspace(np.log10(ylabel_min), np.log10(ylabel_max), step_size)
else:
yaxis_type = 'lin'
y_scale = (10 ** round(np.log10(ylabel_min)))
y_labels = (np.arange(round((ylabel_min / y_scale)), (round((ylabel_max / y_scale)) + 1), 1) * y_scale)
if (y_labels[0] < ylabel_min):
y_labels[0] = ylabel_min
if (y_labels[(- 1)] > ylabel_max):
y_labels[(- 1)] = ylabel_max
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
if (not cfill):
CS1 = ax.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[np.log10(xlabel_min), np.log10(xlabel_max), ylabel_min, ylabel_max], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(np.log10(sample_x), sample_y, logSNR, logLevels, **contourf_kwargs)
ax.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(np.log10(xlabel_min), np.log10(xlabel_max))
ax.set_ylim(ylabel_min, ylabel_max)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
if (not cfill):
CS1 = ax.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[xlabel_min, xlabel_max, np.log10(ylabel_min), np.log10(ylabel_max)], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(sample_x, np.log10(sample_y), logSNR, logLevels, **contourf_kwargs)
ax.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(xlabel_min, xlabel_max)
ax.set_ylim(np.log10(ylabel_min), np.log10(ylabel_max))
elif ((yaxis_type == 'lin') and (xaxis_type == 'lin')):
if (not cfill):
CS1 = ax.contour(sample_x, sample_y, logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[xlabel_min, xlabel_max, ylabel_min, ylabel_max], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap)
else:
CS1 = ax.contourf(sample_x, sample_y, logSNR, logLevels, **contourf_kwargs)
ax.contour(sample_x, sample_y, logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(xlabel_min, xlabel_max)
ax.set_ylim(ylabel_min, ylabel_max)
else:
if (not cfill):
CS1 = ax.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
if smooth_contours:
cmap = mpl.cm.get_cmap(name=contourf_kwargs['cmap'])
cmap.set_under(color='white')
CS1 = ax.imshow(logSNR, extent=[np.log10(xlabel_min), np.log10(xlabel_max), np.log10(ylabel_min), np.log10(ylabel_max)], vmin=logLevels_min, vmax=logLevels_max, origin='lower', aspect='auto', cmap=cmap, interpolation='None')
else:
CS1 = ax.contourf(np.log10(sample_x), np.log10(sample_y), logSNR, logLevels, **contourf_kwargs)
ax.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ax.set_xlim(np.log10(xlabel_min), np.log10(xlabel_max))
ax.set_ylim(np.log10(ylabel_min), np.log10(ylabel_max))
Get_Axes_Labels(ax, 'x', var_x, xaxis_type, x_labels, x_axis_line, xlabels_kwargs, xticklabels_kwargs, xline_kwargs)
Get_Axes_Labels(ax, 'y', var_y, yaxis_type, y_labels, y_axis_line, ylabels_kwargs, yticklabels_kwargs, yline_kwargs)
if (not x_axis_label):
ax.set_xticklabels()
ax.set_xlabel()
if (not y_axis_label):
ax.set_yticklabels()
ax.set_ylabel()
if dl_axis:
if (var_y != 'z'):
raise ValueError('Sorry, we can only plot luminosity distance when redshift is on the y axis.')
ax2 = ax.twinx()
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
ax2.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
ax2.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
ax2.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
dists_min = cosmo.luminosity_distance(ylabel_min).to('Gpc')
dists_min = np.ceil(np.log10(dists_min.value))
dists_max = cosmo.luminosity_distance(ylabel_max).to('Gpc')
dists_max = np.ceil(np.log10(dists_max.value))
dists = np.arange(dists_min, dists_max)
dists = ((10 ** dists) * u.Gpc)
distticks = [z_at_value(cosmo.luminosity_distance, dist) for dist in dists]
ax2.set_yticks(np.log10(distticks))
ax2.set_yticklabels([(('$10^{%i}$' % np.log10(dist)) if (np.abs(int(np.log10(dist))) > 1) else '{:g}'.format(dist)) for dist in dists.value])
ax2.set_ylabel('$D_{L}$ [Gpc]')
elif lb_axis:
if (var_y != 'z'):
raise ValueError('Sorry, we can only plot lookback time when redshift is on the y axis.')
ax2 = ax.twinx()
if ((yaxis_type == 'lin') and (xaxis_type == 'log')):
ax2.contour(np.log10(sample_x), sample_y, logSNR, print_logLevels, **contour_kwargs)
elif ((yaxis_type == 'log') and (xaxis_type == 'lin')):
ax2.contour(sample_x, np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
else:
ax2.contour(np.log10(sample_x), np.log10(sample_y), logSNR, print_logLevels, **contour_kwargs)
ages1 = (np.array([13.5, 13, 10, 5, 1]) * u.Gyr)
ages2 = (np.array([500, 100, 10, 1]) * u.Myr)
ages2 = ages2.to('Gyr')
ages = np.hstack((ages1.value, ages2.value))
ages = (ages * u.Gyr)
ageticks = [z_at_value(cosmo.age, age) for age in ages]
ax2.set_yticks(np.log10(ageticks))
ax2.set_yticklabels(['{:g}'.format(age) for age in ages.value])
ax2.set_ylabel('$t_{\\rm cosmic}$ [Gyr]')
ax2.yaxis.set_label_coords(1.2, 0.5)
if display_cbar:
if (lb_axis or dl_axis):
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7])
if (not cfill):
norm = colors.Normalize(vmin=logLevels_min, vmax=logLevels_max)
tick_levels = np.linspace(float(logLevels_min), logLevels_max, len(print_logLevels))
cbar = mpl.colorbar.ColorbarBase(cbar_ax, ax=(ax, ax2), pad=0.01, cmap=CS1.cmap, norm=norm, boundaries=tick_levels, ticks=tick_levels, spacing='proportional')
else:
cbar = fig.colorbar(CS1, cax=cbar_ax, ax=(ax, ax2), pad=0.01)
else:
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.82, 0.15, 0.025, 0.7])
if (not cfill):
norm = colors.Normalize(vmin=logLevels_min, vmax=logLevels_max)
tick_levels = np.linspace(float(logLevels_min), logLevels_max, len(print_logLevels))
cbar = mpl.colorbar.ColorbarBase(cbar_ax, cmap=CS1.cmap, norm=norm, boundaries=tick_levels, ticks=tick_levels, spacing='proportional')
else:
cbar = fig.colorbar(CS1, cax=cbar_ax, ticks=print_logLevels)
cbar.set_label('SNR')
cbar.ax.set_yticklabels([(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in print_logLevels], **yticklabels_kwargs)
if display:
fig.subplots_adjust(hspace=hspace, wspace=wspace)
plt.show()
if return_plt:
return (fig, ax)<|docstring|>Plots the SNR contours from calcSNR
Parameters
----------
var_x: str
x-axis variable
sample_x: array
samples at which ``SNRMatrix`` was calculated corresponding to the x-axis variable
var_y: str
y-axis variable
sample_y: array
samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable
SNRMatrix: array-like
the matrix at which the SNR was calculated corresponding to the particular x and y-axis variable choices
fig: object, optional
matplotlib figure object on which to collate the individual plots
ax: object, optional
matplotlib axes object on which to plot the individual plot
display: bool, optional
Option to turn off display if saving multiple plots to a file
return_plt: bool, optional
Option to return ``fig`` and ``ax``
dl_axis: bool, optional
Option to turn on the right hand side labels of luminosity distance
lb_axis: bool, optional
Option to turn on the right hand side labels of lookback time
smooth_contours: bool, optional
Option to have contours appear smooth instead of tiered (depending on sample size the edges appear boxey).
cfill: bool, optional
Option to use filled contours or not, default is ``True``
display_cbar: bool, optional
Option to display the colorbar on the axes object
x_axis_label: bool, optional
Option to display the x axis label
y_axis_label: bool, optional
Option to display the y axis label
x_axis_line: int,float, optional
Option to display a line on the x axis if not None
y_axis_line: int,float, optional
Option to display a line on the y axis if not None
logLevels_min: float, optional
Sets the minimum log level of the colorbar, default is -1.0 which set the minimum to the log minimum of the given ``SNRMatrix``
logLevels_max: float, optional
Sets the maximum log level of the colorbar, default is 0.0, which sets the maximum to the log maximum value of the given ``SNRMatrix``
hspace: float, optional
Sets the vertical space between axes objects, default is 0.15
wspace: float, optional
Sets the horizontal space between axes objects, default is 0.1
contour_kwargs: dict, optional
Sets additional kwargs taken by contour in matplotlib
contourf_kwargs: dict, optional
Sets additional kwargs taken by contourf in matplotlib
xticklabels_kwargs: dict, optional
Sets additional kwargs taken by xticklabel in matplotlib
xlabels_kwargs=: dict, optional
Sets additional kwargs taken by xlabel in matplotlib
xline_kwargs: dict, optional
Sets additional kwargs taken by ax.axvline in matplotlib
yticklabels_kwargs: dict, optional
Sets additional kwargs taken by yticklabel in matplotlib
ylabels_kwargs: dict, optional
Sets additional kwargs taken by ylabel in matplotlib
yline_kwargs: dict, optional
Sets additional kwargs taken by ax.axhline in matplotlib<|endoftext|> |
8f26edec765c4b0909752191d401d3e2b50d72dc62e52bf8fd43a118e6dd281c | def Get_Axes_Labels(ax, var_axis, var, var_scale, orig_labels, line_val, label_kwargs, tick_label_kwargs, line_kwargs):
"Gives paper plot labels for given axis\n\n Parameters\n ----------\n ax: object\n The current axes object\n var_axis: str\n The axis to change labels and ticks, can either be ``'y'`` or ``'x'``\n var: str\n The variable to label\n orig_labels: list,np.ndarray\n The original labels for the particular axis, may be updated depending on parameter\n line_val: int,float\n Value of line plotted on ``var_axis`` if not None. Assumed to be non-log10 value\n label_kwargs: dict\n The dictionary adjusting the particular axis' label kwargs\n tick_label_kwargs: dict\n The dictionary adjusting the particular axis' tick label kwargs\n line_kwargs: dict\n The dictionary associated with the line displayed on ``var_axis``\n\n "
if (var_axis not in ['y', 'x']):
raise ValueError('var_axis can only by x or y')
ax_dict = {}
if (var == 'M'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$M_{\\mathrm{tot}}~[M_{\\odot}]$'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var == 'q'):
new_labels = orig_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Mass~Ratio}~q$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'z'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$\\mathrm{Redshift}~z$'
ax_dict[(var_axis + 'ticklabels')] = [(x if (int(x) < 1) else int(x)) for x in orig_labels]
elif (var in ['chi1', 'chi2', 'chii']):
new_labels = (np.arange(round((min(orig_labels) * 10)), (round((max(orig_labels) * 10)) + 1), 1) / 10)
new_labels = new_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Spin}~\\chi_{i}$'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in new_labels]
elif (var == 'L'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Arm Length [m]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var == 'A_acc'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{acc}} [\\mathrm{m~s^{-2}}]$'
ax_dict[(var_axis + 'ticklabels')] = [('$10^{%.0f}$' % x) for x in np.log10(orig_labels)]
elif (var == 'A_IFO'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{IFO}}$ [m]'
ax_dict[(var_axis + 'ticklabels')] = [('$10^{%.0f}$' % x) for x in np.log10(orig_labels)]
elif (var_scale == 'lin'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 3) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{IFO}}$ [pm]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels / scale)]
elif (var == 'f_acc_break_low'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{acc,low}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'f_acc_break_high'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{acc,high}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'f_IFO_break'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{IFO,break}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'n_p'):
sample_range = (max(orig_labels) - min(orig_labels))
sample_rate = max(2, int((sample_range / 10)))
new_labels = orig_labels[::sample_rate]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Number~of~Pulsars}$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'cadence'):
new_labels = np.arange(round(min(orig_labels)), (round(max(orig_labels)) + 1), 5)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Observation~Cadence}$ $[\\mathrm{yr}^{-1}]$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'sigma'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = 'TOA Error RMS [ns]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.0f$' % x) for x in (new_labels * 1000000000.0)]
elif (var == 'T_obs'):
new_labels = orig_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '${\\rm T_{obs}}$ [yr]'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'Infrastructure Length'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Infrastructure Length [m]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Infrastructure Length [km]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % (x / 1000.0)) for x in orig_labels]
elif (var == 'Laser Power'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Laser Power [W]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % x) if (abs(int(x)) > 1) else ('$%.1f$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Laser Power [W]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in orig_labels]
elif (var == 'Seismic Gamma'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Seismic Gamma'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Seismic Gamma'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % y) for y in orig_labels]
elif (var == 'Materials Substrate Temp'):
if (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Mirror Substrate Temp [K]'
ax_dict[(var_axis + 'ticklabels')] = [(('$%.1f \\times 10^{%i}$' % ((x / (10 ** int(np.log10(x)))), np.log10(x))) if (np.abs(int(np.log10(x))) > 1) else '{:g}'.format(x)) for x in orig_labels]
elif (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Mirror Substrate Temp [K]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = str(var)
ax_dict[(var_axis + 'ticklabels')] = [(('$%.1f \\times 10^{%i}$' % ((x / (10 ** int(np.log10(x)))), np.log10(x))) if (np.abs(int(np.log10(x))) > 1) else '{:g}'.format(x)) for x in orig_labels]
elif (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = str(var)
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
if (line_val is not None):
if ('linestyle' not in line_kwargs.keys()):
line_kwargs['linestyle'] = '--'
if ('color' not in line_kwargs.keys()):
line_kwargs['color'] = 'k'
if ('label' not in line_kwargs.keys()):
line_kwargs['label'] = 'Proposed Value'
if (var_scale == 'log'):
if (var_axis == 'y'):
ax.axhline(y=np.log10(line_val), **line_kwargs)
elif (var_axis == 'x'):
ax.axvline(x=np.log10(line_val), **line_kwargs)
elif (var_scale == 'lin'):
if (var_axis == 'y'):
ax.axhline(y=line_val, **line_kwargs)
elif (var_axis == 'x'):
ax.axvline(x=line_val, **line_kwargs)
ax.update(ax_dict)
if label_kwargs:
if (var_axis == 'y'):
ax.set_ylabel(ax.get_ylabel(), **label_kwargs)
elif (var_axis == 'x'):
ax.set_xlabel(ax.get_xlabel(), **label_kwargs)
if tick_label_kwargs:
if (var_axis == 'y'):
ax.set_yticklabels(ax.get_yticklabels(), **tick_label_kwargs)
elif (var_axis == 'x'):
ax.set_xticklabels(ax.get_xticklabels(), **tick_label_kwargs) | Gives paper plot labels for given axis
Parameters
----------
ax: object
The current axes object
var_axis: str
The axis to change labels and ticks, can either be ``'y'`` or ``'x'``
var: str
The variable to label
orig_labels: list,np.ndarray
The original labels for the particular axis, may be updated depending on parameter
line_val: int,float
Value of line plotted on ``var_axis`` if not None. Assumed to be non-log10 value
label_kwargs: dict
The dictionary adjusting the particular axis' label kwargs
tick_label_kwargs: dict
The dictionary adjusting the particular axis' tick label kwargs
line_kwargs: dict
The dictionary associated with the line displayed on ``var_axis`` | gwent/snrplot.py | Get_Axes_Labels | ark0015/GWDetectorDesignToolkit | 14 | python | def Get_Axes_Labels(ax, var_axis, var, var_scale, orig_labels, line_val, label_kwargs, tick_label_kwargs, line_kwargs):
"Gives paper plot labels for given axis\n\n Parameters\n ----------\n ax: object\n The current axes object\n var_axis: str\n The axis to change labels and ticks, can either be ``'y'`` or ``'x'``\n var: str\n The variable to label\n orig_labels: list,np.ndarray\n The original labels for the particular axis, may be updated depending on parameter\n line_val: int,float\n Value of line plotted on ``var_axis`` if not None. Assumed to be non-log10 value\n label_kwargs: dict\n The dictionary adjusting the particular axis' label kwargs\n tick_label_kwargs: dict\n The dictionary adjusting the particular axis' tick label kwargs\n line_kwargs: dict\n The dictionary associated with the line displayed on ``var_axis``\n\n "
if (var_axis not in ['y', 'x']):
raise ValueError('var_axis can only by x or y')
ax_dict = {}
if (var == 'M'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$M_{\\mathrm{tot}}~[M_{\\odot}]$'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var == 'q'):
new_labels = orig_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Mass~Ratio}~q$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'z'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$\\mathrm{Redshift}~z$'
ax_dict[(var_axis + 'ticklabels')] = [(x if (int(x) < 1) else int(x)) for x in orig_labels]
elif (var in ['chi1', 'chi2', 'chii']):
new_labels = (np.arange(round((min(orig_labels) * 10)), (round((max(orig_labels) * 10)) + 1), 1) / 10)
new_labels = new_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Spin}~\\chi_{i}$'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in new_labels]
elif (var == 'L'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Arm Length [m]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var == 'A_acc'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{acc}} [\\mathrm{m~s^{-2}}]$'
ax_dict[(var_axis + 'ticklabels')] = [('$10^{%.0f}$' % x) for x in np.log10(orig_labels)]
elif (var == 'A_IFO'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{IFO}}$ [m]'
ax_dict[(var_axis + 'ticklabels')] = [('$10^{%.0f}$' % x) for x in np.log10(orig_labels)]
elif (var_scale == 'lin'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 3) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{IFO}}$ [pm]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels / scale)]
elif (var == 'f_acc_break_low'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{acc,low}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'f_acc_break_high'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{acc,high}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'f_IFO_break'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{IFO,break}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'n_p'):
sample_range = (max(orig_labels) - min(orig_labels))
sample_rate = max(2, int((sample_range / 10)))
new_labels = orig_labels[::sample_rate]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Number~of~Pulsars}$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'cadence'):
new_labels = np.arange(round(min(orig_labels)), (round(max(orig_labels)) + 1), 5)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Observation~Cadence}$ $[\\mathrm{yr}^{-1}]$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'sigma'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = 'TOA Error RMS [ns]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.0f$' % x) for x in (new_labels * 1000000000.0)]
elif (var == 'T_obs'):
new_labels = orig_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '${\\rm T_{obs}}$ [yr]'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'Infrastructure Length'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Infrastructure Length [m]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Infrastructure Length [km]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % (x / 1000.0)) for x in orig_labels]
elif (var == 'Laser Power'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Laser Power [W]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % x) if (abs(int(x)) > 1) else ('$%.1f$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Laser Power [W]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in orig_labels]
elif (var == 'Seismic Gamma'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Seismic Gamma'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Seismic Gamma'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % y) for y in orig_labels]
elif (var == 'Materials Substrate Temp'):
if (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Mirror Substrate Temp [K]'
ax_dict[(var_axis + 'ticklabels')] = [(('$%.1f \\times 10^{%i}$' % ((x / (10 ** int(np.log10(x)))), np.log10(x))) if (np.abs(int(np.log10(x))) > 1) else '{:g}'.format(x)) for x in orig_labels]
elif (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Mirror Substrate Temp [K]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = str(var)
ax_dict[(var_axis + 'ticklabels')] = [(('$%.1f \\times 10^{%i}$' % ((x / (10 ** int(np.log10(x)))), np.log10(x))) if (np.abs(int(np.log10(x))) > 1) else '{:g}'.format(x)) for x in orig_labels]
elif (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = str(var)
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
if (line_val is not None):
if ('linestyle' not in line_kwargs.keys()):
line_kwargs['linestyle'] = '--'
if ('color' not in line_kwargs.keys()):
line_kwargs['color'] = 'k'
if ('label' not in line_kwargs.keys()):
line_kwargs['label'] = 'Proposed Value'
if (var_scale == 'log'):
if (var_axis == 'y'):
ax.axhline(y=np.log10(line_val), **line_kwargs)
elif (var_axis == 'x'):
ax.axvline(x=np.log10(line_val), **line_kwargs)
elif (var_scale == 'lin'):
if (var_axis == 'y'):
ax.axhline(y=line_val, **line_kwargs)
elif (var_axis == 'x'):
ax.axvline(x=line_val, **line_kwargs)
ax.update(ax_dict)
if label_kwargs:
if (var_axis == 'y'):
ax.set_ylabel(ax.get_ylabel(), **label_kwargs)
elif (var_axis == 'x'):
ax.set_xlabel(ax.get_xlabel(), **label_kwargs)
if tick_label_kwargs:
if (var_axis == 'y'):
ax.set_yticklabels(ax.get_yticklabels(), **tick_label_kwargs)
elif (var_axis == 'x'):
ax.set_xticklabels(ax.get_xticklabels(), **tick_label_kwargs) | def Get_Axes_Labels(ax, var_axis, var, var_scale, orig_labels, line_val, label_kwargs, tick_label_kwargs, line_kwargs):
"Gives paper plot labels for given axis\n\n Parameters\n ----------\n ax: object\n The current axes object\n var_axis: str\n The axis to change labels and ticks, can either be ``'y'`` or ``'x'``\n var: str\n The variable to label\n orig_labels: list,np.ndarray\n The original labels for the particular axis, may be updated depending on parameter\n line_val: int,float\n Value of line plotted on ``var_axis`` if not None. Assumed to be non-log10 value\n label_kwargs: dict\n The dictionary adjusting the particular axis' label kwargs\n tick_label_kwargs: dict\n The dictionary adjusting the particular axis' tick label kwargs\n line_kwargs: dict\n The dictionary associated with the line displayed on ``var_axis``\n\n "
if (var_axis not in ['y', 'x']):
raise ValueError('var_axis can only by x or y')
ax_dict = {}
if (var == 'M'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$M_{\\mathrm{tot}}~[M_{\\odot}]$'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var == 'q'):
new_labels = orig_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Mass~Ratio}~q$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'z'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$\\mathrm{Redshift}~z$'
ax_dict[(var_axis + 'ticklabels')] = [(x if (int(x) < 1) else int(x)) for x in orig_labels]
elif (var in ['chi1', 'chi2', 'chii']):
new_labels = (np.arange(round((min(orig_labels) * 10)), (round((max(orig_labels) * 10)) + 1), 1) / 10)
new_labels = new_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Spin}~\\chi_{i}$'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in new_labels]
elif (var == 'L'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Arm Length [m]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%i}$' % x) if (int(x) > 1) else ('$%i$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var == 'A_acc'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{acc}} [\\mathrm{m~s^{-2}}]$'
ax_dict[(var_axis + 'ticklabels')] = [('$10^{%.0f}$' % x) for x in np.log10(orig_labels)]
elif (var == 'A_IFO'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{IFO}}$ [m]'
ax_dict[(var_axis + 'ticklabels')] = [('$10^{%.0f}$' % x) for x in np.log10(orig_labels)]
elif (var_scale == 'lin'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 3) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$A_{\\mathrm{IFO}}$ [pm]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels / scale)]
elif (var == 'f_acc_break_low'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{acc,low}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'f_acc_break_high'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{acc,high}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'f_IFO_break'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$f_{\\mathrm{IFO,break}}$ [mHz]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in (new_labels * 1000.0)]
elif (var == 'n_p'):
sample_range = (max(orig_labels) - min(orig_labels))
sample_rate = max(2, int((sample_range / 10)))
new_labels = orig_labels[::sample_rate]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Number~of~Pulsars}$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'cadence'):
new_labels = np.arange(round(min(orig_labels)), (round(max(orig_labels)) + 1), 5)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '$\\mathrm{Observation~Cadence}$ $[\\mathrm{yr}^{-1}]$'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'sigma'):
scale = (10 ** round(np.log10(min(orig_labels))))
new_labels = (np.arange(round((min(orig_labels) / scale)), (round((max(orig_labels) / scale)) + 1), 1) * scale)
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = 'TOA Error RMS [ns]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.0f$' % x) for x in (new_labels * 1000000000.0)]
elif (var == 'T_obs'):
new_labels = orig_labels[::2]
ax_dict[(var_axis + 'ticks')] = new_labels
ax_dict[(var_axis + 'label')] = '${\\rm T_{obs}}$ [yr]'
ax_dict[(var_axis + 'ticklabels')] = [('$%i$' % int(x)) for x in new_labels]
elif (var == 'Infrastructure Length'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Infrastructure Length [m]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Infrastructure Length [km]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % (x / 1000.0)) for x in orig_labels]
elif (var == 'Laser Power'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Laser Power [W]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % x) if (abs(int(x)) > 1) else ('$%.1f$' % (10 ** x))) for x in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Laser Power [W]'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % x) for x in orig_labels]
elif (var == 'Seismic Gamma'):
if (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Seismic Gamma'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Seismic Gamma'
ax_dict[(var_axis + 'ticklabels')] = [('$%.1f$' % y) for y in orig_labels]
elif (var == 'Materials Substrate Temp'):
if (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = 'Mirror Substrate Temp [K]'
ax_dict[(var_axis + 'ticklabels')] = [(('$%.1f \\times 10^{%i}$' % ((x / (10 ** int(np.log10(x)))), np.log10(x))) if (np.abs(int(np.log10(x))) > 1) else '{:g}'.format(x)) for x in orig_labels]
elif (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = 'Mirror Substrate Temp [K]'
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
elif (var_scale == 'lin'):
ax_dict[(var_axis + 'ticks')] = orig_labels
ax_dict[(var_axis + 'label')] = str(var)
ax_dict[(var_axis + 'ticklabels')] = [(('$%.1f \\times 10^{%i}$' % ((x / (10 ** int(np.log10(x)))), np.log10(x))) if (np.abs(int(np.log10(x))) > 1) else '{:g}'.format(x)) for x in orig_labels]
elif (var_scale == 'log'):
ax_dict[(var_axis + 'ticks')] = np.log10(orig_labels)
ax_dict[(var_axis + 'label')] = str(var)
ax_dict[(var_axis + 'ticklabels')] = [(('$10^{%.0f}$' % y) if (abs(int(y)) > 1) else ('$%.1f$' % (10 ** y))) for y in np.log10(orig_labels)]
if (line_val is not None):
if ('linestyle' not in line_kwargs.keys()):
line_kwargs['linestyle'] = '--'
if ('color' not in line_kwargs.keys()):
line_kwargs['color'] = 'k'
if ('label' not in line_kwargs.keys()):
line_kwargs['label'] = 'Proposed Value'
if (var_scale == 'log'):
if (var_axis == 'y'):
ax.axhline(y=np.log10(line_val), **line_kwargs)
elif (var_axis == 'x'):
ax.axvline(x=np.log10(line_val), **line_kwargs)
elif (var_scale == 'lin'):
if (var_axis == 'y'):
ax.axhline(y=line_val, **line_kwargs)
elif (var_axis == 'x'):
ax.axvline(x=line_val, **line_kwargs)
ax.update(ax_dict)
if label_kwargs:
if (var_axis == 'y'):
ax.set_ylabel(ax.get_ylabel(), **label_kwargs)
elif (var_axis == 'x'):
ax.set_xlabel(ax.get_xlabel(), **label_kwargs)
if tick_label_kwargs:
if (var_axis == 'y'):
ax.set_yticklabels(ax.get_yticklabels(), **tick_label_kwargs)
elif (var_axis == 'x'):
ax.set_xticklabels(ax.get_xticklabels(), **tick_label_kwargs)<|docstring|>Gives paper plot labels for given axis
Parameters
----------
ax: object
The current axes object
var_axis: str
The axis to change labels and ticks, can either be ``'y'`` or ``'x'``
var: str
The variable to label
orig_labels: list,np.ndarray
The original labels for the particular axis, may be updated depending on parameter
line_val: int,float
Value of line plotted on ``var_axis`` if not None. Assumed to be non-log10 value
label_kwargs: dict
The dictionary adjusting the particular axis' label kwargs
tick_label_kwargs: dict
The dictionary adjusting the particular axis' tick label kwargs
line_kwargs: dict
The dictionary associated with the line displayed on ``var_axis``<|endoftext|> |
3066ab097993ede83d84f34b3877aa46b9660880d530c4ad765a276a58a6bfe7 | def ValidMatches(basename, cc, grep_lines):
"Filter out 'git grep' matches with header files already."
matches = []
for line in grep_lines:
(gnfile, linenr, contents) = line.split(':')
linenr = int(linenr)
new = re.sub(cc, basename, contents)
lines = open(gnfile).read().splitlines()
assert (contents in lines[(linenr - 1)])
if (lines[linenr] == new):
continue
if (lines[(linenr - 2)] == new):
continue
print(' ', gnfile, linenr, new)
matches.append((gnfile, linenr, new))
return matches | Filter out 'git grep' matches with header files already. | src/build/fix_gn_headers.py | ValidMatches | tang88888888/naiveproxy | 14,668 | python | def ValidMatches(basename, cc, grep_lines):
matches = []
for line in grep_lines:
(gnfile, linenr, contents) = line.split(':')
linenr = int(linenr)
new = re.sub(cc, basename, contents)
lines = open(gnfile).read().splitlines()
assert (contents in lines[(linenr - 1)])
if (lines[linenr] == new):
continue
if (lines[(linenr - 2)] == new):
continue
print(' ', gnfile, linenr, new)
matches.append((gnfile, linenr, new))
return matches | def ValidMatches(basename, cc, grep_lines):
matches = []
for line in grep_lines:
(gnfile, linenr, contents) = line.split(':')
linenr = int(linenr)
new = re.sub(cc, basename, contents)
lines = open(gnfile).read().splitlines()
assert (contents in lines[(linenr - 1)])
if (lines[linenr] == new):
continue
if (lines[(linenr - 2)] == new):
continue
print(' ', gnfile, linenr, new)
matches.append((gnfile, linenr, new))
return matches<|docstring|>Filter out 'git grep' matches with header files already.<|endoftext|> |
ab500d4e237767da2f7fa5ed7484230cf75c1f2bcd0ec0c391395d01ffeca09d | def AddHeadersNextToCC(headers, skip_ambiguous=True):
'Add header files next to the corresponding .cc files in GN files.\n\n When skip_ambiguous is True, skip if multiple .cc files are found.\n Returns unhandled headers.\n\n Manual cleaning up is likely required, especially if not skip_ambiguous.\n '
edits = {}
unhandled = []
for filename in headers:
filename = filename.strip()
if (not (filename.endswith('.h') or filename.endswith('.hh'))):
continue
basename = os.path.basename(filename)
print(filename)
cc = (('\\b' + os.path.splitext(basename)[0]) + '\\.(cc|cpp|mm)\\b')
(out, returncode) = GitGrep((('(/|")' + cc) + '"'))
if ((returncode != 0) or (not out)):
unhandled.append(filename)
continue
matches = ValidMatches(basename, cc, out.splitlines())
if (len(matches) == 0):
continue
if (len(matches) > 1):
print('\n[WARNING] Ambiguous matching for', filename)
for i in enumerate(matches, 1):
print(('%d: %s' % (i[0], i[1])))
print()
if skip_ambiguous:
continue
picked = raw_input('Pick the matches ("2,3" for multiple): ')
try:
matches = [matches[(int(i) - 1)] for i in picked.split(',')]
except (ValueError, IndexError):
continue
for match in matches:
(gnfile, linenr, new) = match
print(' ', gnfile, linenr, new)
edits.setdefault(gnfile, {})[linenr] = new
for gnfile in edits:
lines = open(gnfile).read().splitlines()
for l in sorted(edits[gnfile].keys(), reverse=True):
lines.insert(l, edits[gnfile][l])
open(gnfile, 'w').write(('\n'.join(lines) + '\n'))
return unhandled | Add header files next to the corresponding .cc files in GN files.
When skip_ambiguous is True, skip if multiple .cc files are found.
Returns unhandled headers.
Manual cleaning up is likely required, especially if not skip_ambiguous. | src/build/fix_gn_headers.py | AddHeadersNextToCC | tang88888888/naiveproxy | 14,668 | python | def AddHeadersNextToCC(headers, skip_ambiguous=True):
'Add header files next to the corresponding .cc files in GN files.\n\n When skip_ambiguous is True, skip if multiple .cc files are found.\n Returns unhandled headers.\n\n Manual cleaning up is likely required, especially if not skip_ambiguous.\n '
edits = {}
unhandled = []
for filename in headers:
filename = filename.strip()
if (not (filename.endswith('.h') or filename.endswith('.hh'))):
continue
basename = os.path.basename(filename)
print(filename)
cc = (('\\b' + os.path.splitext(basename)[0]) + '\\.(cc|cpp|mm)\\b')
(out, returncode) = GitGrep((('(/|")' + cc) + '"'))
if ((returncode != 0) or (not out)):
unhandled.append(filename)
continue
matches = ValidMatches(basename, cc, out.splitlines())
if (len(matches) == 0):
continue
if (len(matches) > 1):
print('\n[WARNING] Ambiguous matching for', filename)
for i in enumerate(matches, 1):
print(('%d: %s' % (i[0], i[1])))
print()
if skip_ambiguous:
continue
picked = raw_input('Pick the matches ("2,3" for multiple): ')
try:
matches = [matches[(int(i) - 1)] for i in picked.split(',')]
except (ValueError, IndexError):
continue
for match in matches:
(gnfile, linenr, new) = match
print(' ', gnfile, linenr, new)
edits.setdefault(gnfile, {})[linenr] = new
for gnfile in edits:
lines = open(gnfile).read().splitlines()
for l in sorted(edits[gnfile].keys(), reverse=True):
lines.insert(l, edits[gnfile][l])
open(gnfile, 'w').write(('\n'.join(lines) + '\n'))
return unhandled | def AddHeadersNextToCC(headers, skip_ambiguous=True):
'Add header files next to the corresponding .cc files in GN files.\n\n When skip_ambiguous is True, skip if multiple .cc files are found.\n Returns unhandled headers.\n\n Manual cleaning up is likely required, especially if not skip_ambiguous.\n '
edits = {}
unhandled = []
for filename in headers:
filename = filename.strip()
if (not (filename.endswith('.h') or filename.endswith('.hh'))):
continue
basename = os.path.basename(filename)
print(filename)
cc = (('\\b' + os.path.splitext(basename)[0]) + '\\.(cc|cpp|mm)\\b')
(out, returncode) = GitGrep((('(/|")' + cc) + '"'))
if ((returncode != 0) or (not out)):
unhandled.append(filename)
continue
matches = ValidMatches(basename, cc, out.splitlines())
if (len(matches) == 0):
continue
if (len(matches) > 1):
print('\n[WARNING] Ambiguous matching for', filename)
for i in enumerate(matches, 1):
print(('%d: %s' % (i[0], i[1])))
print()
if skip_ambiguous:
continue
picked = raw_input('Pick the matches ("2,3" for multiple): ')
try:
matches = [matches[(int(i) - 1)] for i in picked.split(',')]
except (ValueError, IndexError):
continue
for match in matches:
(gnfile, linenr, new) = match
print(' ', gnfile, linenr, new)
edits.setdefault(gnfile, {})[linenr] = new
for gnfile in edits:
lines = open(gnfile).read().splitlines()
for l in sorted(edits[gnfile].keys(), reverse=True):
lines.insert(l, edits[gnfile][l])
open(gnfile, 'w').write(('\n'.join(lines) + '\n'))
return unhandled<|docstring|>Add header files next to the corresponding .cc files in GN files.
When skip_ambiguous is True, skip if multiple .cc files are found.
Returns unhandled headers.
Manual cleaning up is likely required, especially if not skip_ambiguous.<|endoftext|> |
a731de11040ac4097c2c3e6d67595ad565f6f8e2ce975ef3f68158af52b57780 | def AddHeadersToSources(headers, skip_ambiguous=True):
'Add header files to the sources list in the first GN file.\n\n The target GN file is the first one up the parent directories.\n This usually does the wrong thing for _test files if the test and the main\n target are in the same .gn file.\n When skip_ambiguous is True, skip if multiple sources arrays are found.\n\n "git cl format" afterwards is required. Manually cleaning up duplicated items\n is likely required.\n '
for filename in headers:
filename = filename.strip()
print(filename)
dirname = os.path.dirname(filename)
while (not os.path.exists(os.path.join(dirname, 'BUILD.gn'))):
dirname = os.path.dirname(dirname)
rel = filename[(len(dirname) + 1):]
gnfile = os.path.join(dirname, 'BUILD.gn')
lines = open(gnfile).read().splitlines()
matched = [i for (i, l) in enumerate(lines) if (' sources = [' in l)]
if (skip_ambiguous and (len(matched) > 1)):
print('[WARNING] Multiple sources in', gnfile)
continue
if (len(matched) < 1):
continue
print(' ', gnfile, rel)
index = matched[0]
lines.insert((index + 1), ('"%s",' % rel))
open(gnfile, 'w').write(('\n'.join(lines) + '\n')) | Add header files to the sources list in the first GN file.
The target GN file is the first one up the parent directories.
This usually does the wrong thing for _test files if the test and the main
target are in the same .gn file.
When skip_ambiguous is True, skip if multiple sources arrays are found.
"git cl format" afterwards is required. Manually cleaning up duplicated items
is likely required. | src/build/fix_gn_headers.py | AddHeadersToSources | tang88888888/naiveproxy | 14,668 | python | def AddHeadersToSources(headers, skip_ambiguous=True):
'Add header files to the sources list in the first GN file.\n\n The target GN file is the first one up the parent directories.\n This usually does the wrong thing for _test files if the test and the main\n target are in the same .gn file.\n When skip_ambiguous is True, skip if multiple sources arrays are found.\n\n "git cl format" afterwards is required. Manually cleaning up duplicated items\n is likely required.\n '
for filename in headers:
filename = filename.strip()
print(filename)
dirname = os.path.dirname(filename)
while (not os.path.exists(os.path.join(dirname, 'BUILD.gn'))):
dirname = os.path.dirname(dirname)
rel = filename[(len(dirname) + 1):]
gnfile = os.path.join(dirname, 'BUILD.gn')
lines = open(gnfile).read().splitlines()
matched = [i for (i, l) in enumerate(lines) if (' sources = [' in l)]
if (skip_ambiguous and (len(matched) > 1)):
print('[WARNING] Multiple sources in', gnfile)
continue
if (len(matched) < 1):
continue
print(' ', gnfile, rel)
index = matched[0]
lines.insert((index + 1), ('"%s",' % rel))
open(gnfile, 'w').write(('\n'.join(lines) + '\n')) | def AddHeadersToSources(headers, skip_ambiguous=True):
'Add header files to the sources list in the first GN file.\n\n The target GN file is the first one up the parent directories.\n This usually does the wrong thing for _test files if the test and the main\n target are in the same .gn file.\n When skip_ambiguous is True, skip if multiple sources arrays are found.\n\n "git cl format" afterwards is required. Manually cleaning up duplicated items\n is likely required.\n '
for filename in headers:
filename = filename.strip()
print(filename)
dirname = os.path.dirname(filename)
while (not os.path.exists(os.path.join(dirname, 'BUILD.gn'))):
dirname = os.path.dirname(dirname)
rel = filename[(len(dirname) + 1):]
gnfile = os.path.join(dirname, 'BUILD.gn')
lines = open(gnfile).read().splitlines()
matched = [i for (i, l) in enumerate(lines) if (' sources = [' in l)]
if (skip_ambiguous and (len(matched) > 1)):
print('[WARNING] Multiple sources in', gnfile)
continue
if (len(matched) < 1):
continue
print(' ', gnfile, rel)
index = matched[0]
lines.insert((index + 1), ('"%s",' % rel))
open(gnfile, 'w').write(('\n'.join(lines) + '\n'))<|docstring|>Add header files to the sources list in the first GN file.
The target GN file is the first one up the parent directories.
This usually does the wrong thing for _test files if the test and the main
target are in the same .gn file.
When skip_ambiguous is True, skip if multiple sources arrays are found.
"git cl format" afterwards is required. Manually cleaning up duplicated items
is likely required.<|endoftext|> |
401ed0eb0edefcd750b513b4c28073cba9542312400029156ebdd5d2e1e4e049 | def RemoveHeader(headers, skip_ambiguous=True):
'Remove non-existing headers in GN files.\n\n When skip_ambiguous is True, skip if multiple matches are found.\n '
edits = {}
unhandled = []
for filename in headers:
filename = filename.strip()
if (not (filename.endswith('.h') or filename.endswith('.hh'))):
continue
basename = os.path.basename(filename)
print(filename)
(out, returncode) = GitGrep((('(/|")' + basename) + '"'))
if ((returncode != 0) or (not out)):
unhandled.append(filename)
print(' Not found')
continue
grep_lines = out.splitlines()
matches = []
for line in grep_lines:
(gnfile, linenr, contents) = line.split(':')
print(' ', gnfile, linenr, contents)
linenr = int(linenr)
lines = open(gnfile).read().splitlines()
assert (contents in lines[(linenr - 1)])
matches.append((gnfile, linenr, contents))
if (len(matches) == 0):
continue
if (len(matches) > 1):
print('\n[WARNING] Ambiguous matching for', filename)
for i in enumerate(matches, 1):
print(('%d: %s' % (i[0], i[1])))
print()
if skip_ambiguous:
continue
picked = raw_input('Pick the matches ("2,3" for multiple): ')
try:
matches = [matches[(int(i) - 1)] for i in picked.split(',')]
except (ValueError, IndexError):
continue
for match in matches:
(gnfile, linenr, contents) = match
print(' ', gnfile, linenr, contents)
edits.setdefault(gnfile, set()).add(linenr)
for gnfile in edits:
lines = open(gnfile).read().splitlines()
for l in sorted(edits[gnfile], reverse=True):
lines.pop((l - 1))
open(gnfile, 'w').write(('\n'.join(lines) + '\n'))
return unhandled | Remove non-existing headers in GN files.
When skip_ambiguous is True, skip if multiple matches are found. | src/build/fix_gn_headers.py | RemoveHeader | tang88888888/naiveproxy | 14,668 | python | def RemoveHeader(headers, skip_ambiguous=True):
'Remove non-existing headers in GN files.\n\n When skip_ambiguous is True, skip if multiple matches are found.\n '
edits = {}
unhandled = []
for filename in headers:
filename = filename.strip()
if (not (filename.endswith('.h') or filename.endswith('.hh'))):
continue
basename = os.path.basename(filename)
print(filename)
(out, returncode) = GitGrep((('(/|")' + basename) + '"'))
if ((returncode != 0) or (not out)):
unhandled.append(filename)
print(' Not found')
continue
grep_lines = out.splitlines()
matches = []
for line in grep_lines:
(gnfile, linenr, contents) = line.split(':')
print(' ', gnfile, linenr, contents)
linenr = int(linenr)
lines = open(gnfile).read().splitlines()
assert (contents in lines[(linenr - 1)])
matches.append((gnfile, linenr, contents))
if (len(matches) == 0):
continue
if (len(matches) > 1):
print('\n[WARNING] Ambiguous matching for', filename)
for i in enumerate(matches, 1):
print(('%d: %s' % (i[0], i[1])))
print()
if skip_ambiguous:
continue
picked = raw_input('Pick the matches ("2,3" for multiple): ')
try:
matches = [matches[(int(i) - 1)] for i in picked.split(',')]
except (ValueError, IndexError):
continue
for match in matches:
(gnfile, linenr, contents) = match
print(' ', gnfile, linenr, contents)
edits.setdefault(gnfile, set()).add(linenr)
for gnfile in edits:
lines = open(gnfile).read().splitlines()
for l in sorted(edits[gnfile], reverse=True):
lines.pop((l - 1))
open(gnfile, 'w').write(('\n'.join(lines) + '\n'))
return unhandled | def RemoveHeader(headers, skip_ambiguous=True):
'Remove non-existing headers in GN files.\n\n When skip_ambiguous is True, skip if multiple matches are found.\n '
edits = {}
unhandled = []
for filename in headers:
filename = filename.strip()
if (not (filename.endswith('.h') or filename.endswith('.hh'))):
continue
basename = os.path.basename(filename)
print(filename)
(out, returncode) = GitGrep((('(/|")' + basename) + '"'))
if ((returncode != 0) or (not out)):
unhandled.append(filename)
print(' Not found')
continue
grep_lines = out.splitlines()
matches = []
for line in grep_lines:
(gnfile, linenr, contents) = line.split(':')
print(' ', gnfile, linenr, contents)
linenr = int(linenr)
lines = open(gnfile).read().splitlines()
assert (contents in lines[(linenr - 1)])
matches.append((gnfile, linenr, contents))
if (len(matches) == 0):
continue
if (len(matches) > 1):
print('\n[WARNING] Ambiguous matching for', filename)
for i in enumerate(matches, 1):
print(('%d: %s' % (i[0], i[1])))
print()
if skip_ambiguous:
continue
picked = raw_input('Pick the matches ("2,3" for multiple): ')
try:
matches = [matches[(int(i) - 1)] for i in picked.split(',')]
except (ValueError, IndexError):
continue
for match in matches:
(gnfile, linenr, contents) = match
print(' ', gnfile, linenr, contents)
edits.setdefault(gnfile, set()).add(linenr)
for gnfile in edits:
lines = open(gnfile).read().splitlines()
for l in sorted(edits[gnfile], reverse=True):
lines.pop((l - 1))
open(gnfile, 'w').write(('\n'.join(lines) + '\n'))
return unhandled<|docstring|>Remove non-existing headers in GN files.
When skip_ambiguous is True, skip if multiple matches are found.<|endoftext|> |
0b50fa505a649afc7ed8f199ae46365ad0d344e34efa28d3bace3617a36d708e | def conv_block(m, num_kernels, kernel_size, strides, padding, activation, dropout, data_format, bn):
"\n Bulding block with convolutional layers for one level.\n\n :param m: model\n :param num_kernels: number of convolution filters on the particular level, positive integer\n :param kernel_size: size of the convolution kernel, tuple of two positive integers\n :param strides: strides values, tuple of two positive integers\n :param padding: used padding by convolution, takes values: 'same' or 'valid'\n :param activation: activation_function after every convolution\n :param dropout: percentage of weights to be dropped, float between 0 and 1\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,\n False do not use Batch Normalization\n :return: model\n "
n = Convolution2D(num_kernels, kernel_size, strides=strides, activation=activation, padding=padding, data_format=data_format)(m)
n = (BatchNormalization()(n) if bn else n)
n = Dropout(dropout)(n)
n = Convolution2D(num_kernels, kernel_size, strides=strides, activation=activation, padding=padding, data_format=data_format)(n)
n = (BatchNormalization()(n) if bn else n)
return n | Bulding block with convolutional layers for one level.
:param m: model
:param num_kernels: number of convolution filters on the particular level, positive integer
:param kernel_size: size of the convolution kernel, tuple of two positive integers
:param strides: strides values, tuple of two positive integers
:param padding: used padding by convolution, takes values: 'same' or 'valid'
:param activation: activation_function after every convolution
:param dropout: percentage of weights to be dropped, float between 0 and 1
:param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'
:param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,
False do not use Batch Normalization
:return: model | Unet/utils/unet.py | conv_block | prediction2020/unet-vessel-segmentation | 23 | python | def conv_block(m, num_kernels, kernel_size, strides, padding, activation, dropout, data_format, bn):
"\n Bulding block with convolutional layers for one level.\n\n :param m: model\n :param num_kernels: number of convolution filters on the particular level, positive integer\n :param kernel_size: size of the convolution kernel, tuple of two positive integers\n :param strides: strides values, tuple of two positive integers\n :param padding: used padding by convolution, takes values: 'same' or 'valid'\n :param activation: activation_function after every convolution\n :param dropout: percentage of weights to be dropped, float between 0 and 1\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,\n False do not use Batch Normalization\n :return: model\n "
n = Convolution2D(num_kernels, kernel_size, strides=strides, activation=activation, padding=padding, data_format=data_format)(m)
n = (BatchNormalization()(n) if bn else n)
n = Dropout(dropout)(n)
n = Convolution2D(num_kernels, kernel_size, strides=strides, activation=activation, padding=padding, data_format=data_format)(n)
n = (BatchNormalization()(n) if bn else n)
return n | def conv_block(m, num_kernels, kernel_size, strides, padding, activation, dropout, data_format, bn):
"\n Bulding block with convolutional layers for one level.\n\n :param m: model\n :param num_kernels: number of convolution filters on the particular level, positive integer\n :param kernel_size: size of the convolution kernel, tuple of two positive integers\n :param strides: strides values, tuple of two positive integers\n :param padding: used padding by convolution, takes values: 'same' or 'valid'\n :param activation: activation_function after every convolution\n :param dropout: percentage of weights to be dropped, float between 0 and 1\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,\n False do not use Batch Normalization\n :return: model\n "
n = Convolution2D(num_kernels, kernel_size, strides=strides, activation=activation, padding=padding, data_format=data_format)(m)
n = (BatchNormalization()(n) if bn else n)
n = Dropout(dropout)(n)
n = Convolution2D(num_kernels, kernel_size, strides=strides, activation=activation, padding=padding, data_format=data_format)(n)
n = (BatchNormalization()(n) if bn else n)
return n<|docstring|>Bulding block with convolutional layers for one level.
:param m: model
:param num_kernels: number of convolution filters on the particular level, positive integer
:param kernel_size: size of the convolution kernel, tuple of two positive integers
:param strides: strides values, tuple of two positive integers
:param padding: used padding by convolution, takes values: 'same' or 'valid'
:param activation: activation_function after every convolution
:param dropout: percentage of weights to be dropped, float between 0 and 1
:param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'
:param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,
False do not use Batch Normalization
:return: model<|endoftext|> |
896dd0225f8110f4604c61c739ecc041861fce8f8a14797ef4f9e6be380ca4b5 | def up_concat_block(m, concat_channels, pool_size, concat_axis, data_format):
"\n Bulding block with up-sampling and concatenation for one level.\n\n :param m: model\n :param concat_channels: channels from left side onf Unet to be concatenated with the right part on one level\n :param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers\n :param concat_axis: concatenation axis, concatenate over channels, positive integer\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :return: model "
n = UpSampling2D(size=pool_size, data_format=data_format)(m)
n = concatenate([n, concat_channels], axis=concat_axis)
return n | Bulding block with up-sampling and concatenation for one level.
:param m: model
:param concat_channels: channels from left side onf Unet to be concatenated with the right part on one level
:param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers
:param concat_axis: concatenation axis, concatenate over channels, positive integer
:param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'
:return: model | Unet/utils/unet.py | up_concat_block | prediction2020/unet-vessel-segmentation | 23 | python | def up_concat_block(m, concat_channels, pool_size, concat_axis, data_format):
"\n Bulding block with up-sampling and concatenation for one level.\n\n :param m: model\n :param concat_channels: channels from left side onf Unet to be concatenated with the right part on one level\n :param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers\n :param concat_axis: concatenation axis, concatenate over channels, positive integer\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :return: model "
n = UpSampling2D(size=pool_size, data_format=data_format)(m)
n = concatenate([n, concat_channels], axis=concat_axis)
return n | def up_concat_block(m, concat_channels, pool_size, concat_axis, data_format):
"\n Bulding block with up-sampling and concatenation for one level.\n\n :param m: model\n :param concat_channels: channels from left side onf Unet to be concatenated with the right part on one level\n :param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers\n :param concat_axis: concatenation axis, concatenate over channels, positive integer\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :return: model "
n = UpSampling2D(size=pool_size, data_format=data_format)(m)
n = concatenate([n, concat_channels], axis=concat_axis)
return n<|docstring|>Bulding block with up-sampling and concatenation for one level.
:param m: model
:param concat_channels: channels from left side onf Unet to be concatenated with the right part on one level
:param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers
:param concat_axis: concatenation axis, concatenate over channels, positive integer
:param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'
:return: model<|endoftext|> |
352ad15018741b28e05dcb3703dcc8298c6ce1e7587ed100822182528a704cb2 | def get_unet(patch_size, num_channels, activation, final_activation, optimizer, learning_rate, dropout, loss_function, metrics=None, kernel_size=(3, 3), pool_size=(2, 2), strides=(1, 1), num_kernels=None, concat_axis=3, data_format='channels_last', padding='same', bn=False):
"\n Defines the architecture of the u-net. Reconstruction of the u-net introduced in: https://arxiv.org/abs/1505.04597\n\n :param patch_size: height of the patches, positive integer\n :param num_channels: number of channels of the input images, positive integer\n :param activation: activation_function after every convolution\n :param final_activation: activation_function of the final layer\n :param optimizer: optimization algorithm for updating the weights and bias values\n :param learning_rate: learning_rate of the optimizer, float\n :param dropout: percentage of weights to be dropped, float between 0 and 1\n :param loss_function: loss function also known as cost function\n :param metrics: metrics for evaluation of the model performance\n :param kernel_size: size of the convolution kernel, tuple of two positive integers\n :param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers\n :param strides: strides values, tuple of two positive integers\n :param num_kernels: array specifying the number of convolution filters in every level, list of positive integers\n containing value for each level of the model\n :param concat_axis: concatenation axis, concatenate over channels, positive integer\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :param padding: used padding by convolution, takes values: 'same' or 'valid'\n :param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,\n False do not use Batch Normalization\n :return: compiled u-net model\n "
if (metrics is None):
metrics = ['accuracy']
if (num_kernels is None):
num_kernels = [64, 128, 256, 512, 1024]
inputs = Input((patch_size, patch_size, num_channels))
conv_0_down = conv_block(inputs, num_kernels[0], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_0 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_0_down)
conv_1_down = conv_block(pool_0, num_kernels[1], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_1 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_1_down)
conv_2_down = conv_block(pool_1, num_kernels[2], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_2 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_2_down)
conv_3_down = conv_block(pool_2, num_kernels[3], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_3 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_3_down)
conv_4 = conv_block(pool_3, num_kernels[4], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_3 = up_concat_block(conv_4, conv_3_down, pool_size, concat_axis, data_format)
conv_3_up = conv_block(concat_3, num_kernels[3], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_2 = up_concat_block(conv_3_up, conv_2_down, pool_size, concat_axis, data_format)
conv_2_up = conv_block(concat_2, num_kernels[2], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_1 = up_concat_block(conv_2_up, conv_1_down, pool_size, concat_axis, data_format)
conv_1_up = conv_block(concat_1, num_kernels[1], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_0 = up_concat_block(conv_1_up, conv_0_down, pool_size, concat_axis, data_format)
conv_0_up = conv_block(concat_0, num_kernels[0], kernel_size, strides, padding, activation, dropout, data_format, bn)
final_conv = Convolution2D(1, 1, strides=strides, activation=final_activation, padding=padding, data_format=data_format)(conv_0_up)
model = Model(inputs=inputs, outputs=final_conv)
model.compile(optimizer=optimizer(lr=learning_rate), loss=loss_function, metrics=metrics)
print('U-net compiled.')
model.summary()
return model | Defines the architecture of the u-net. Reconstruction of the u-net introduced in: https://arxiv.org/abs/1505.04597
:param patch_size: height of the patches, positive integer
:param num_channels: number of channels of the input images, positive integer
:param activation: activation_function after every convolution
:param final_activation: activation_function of the final layer
:param optimizer: optimization algorithm for updating the weights and bias values
:param learning_rate: learning_rate of the optimizer, float
:param dropout: percentage of weights to be dropped, float between 0 and 1
:param loss_function: loss function also known as cost function
:param metrics: metrics for evaluation of the model performance
:param kernel_size: size of the convolution kernel, tuple of two positive integers
:param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers
:param strides: strides values, tuple of two positive integers
:param num_kernels: array specifying the number of convolution filters in every level, list of positive integers
containing value for each level of the model
:param concat_axis: concatenation axis, concatenate over channels, positive integer
:param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'
:param padding: used padding by convolution, takes values: 'same' or 'valid'
:param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,
False do not use Batch Normalization
:return: compiled u-net model | Unet/utils/unet.py | get_unet | prediction2020/unet-vessel-segmentation | 23 | python | def get_unet(patch_size, num_channels, activation, final_activation, optimizer, learning_rate, dropout, loss_function, metrics=None, kernel_size=(3, 3), pool_size=(2, 2), strides=(1, 1), num_kernels=None, concat_axis=3, data_format='channels_last', padding='same', bn=False):
"\n Defines the architecture of the u-net. Reconstruction of the u-net introduced in: https://arxiv.org/abs/1505.04597\n\n :param patch_size: height of the patches, positive integer\n :param num_channels: number of channels of the input images, positive integer\n :param activation: activation_function after every convolution\n :param final_activation: activation_function of the final layer\n :param optimizer: optimization algorithm for updating the weights and bias values\n :param learning_rate: learning_rate of the optimizer, float\n :param dropout: percentage of weights to be dropped, float between 0 and 1\n :param loss_function: loss function also known as cost function\n :param metrics: metrics for evaluation of the model performance\n :param kernel_size: size of the convolution kernel, tuple of two positive integers\n :param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers\n :param strides: strides values, tuple of two positive integers\n :param num_kernels: array specifying the number of convolution filters in every level, list of positive integers\n containing value for each level of the model\n :param concat_axis: concatenation axis, concatenate over channels, positive integer\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :param padding: used padding by convolution, takes values: 'same' or 'valid'\n :param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,\n False do not use Batch Normalization\n :return: compiled u-net model\n "
if (metrics is None):
metrics = ['accuracy']
if (num_kernels is None):
num_kernels = [64, 128, 256, 512, 1024]
inputs = Input((patch_size, patch_size, num_channels))
conv_0_down = conv_block(inputs, num_kernels[0], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_0 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_0_down)
conv_1_down = conv_block(pool_0, num_kernels[1], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_1 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_1_down)
conv_2_down = conv_block(pool_1, num_kernels[2], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_2 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_2_down)
conv_3_down = conv_block(pool_2, num_kernels[3], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_3 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_3_down)
conv_4 = conv_block(pool_3, num_kernels[4], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_3 = up_concat_block(conv_4, conv_3_down, pool_size, concat_axis, data_format)
conv_3_up = conv_block(concat_3, num_kernels[3], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_2 = up_concat_block(conv_3_up, conv_2_down, pool_size, concat_axis, data_format)
conv_2_up = conv_block(concat_2, num_kernels[2], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_1 = up_concat_block(conv_2_up, conv_1_down, pool_size, concat_axis, data_format)
conv_1_up = conv_block(concat_1, num_kernels[1], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_0 = up_concat_block(conv_1_up, conv_0_down, pool_size, concat_axis, data_format)
conv_0_up = conv_block(concat_0, num_kernels[0], kernel_size, strides, padding, activation, dropout, data_format, bn)
final_conv = Convolution2D(1, 1, strides=strides, activation=final_activation, padding=padding, data_format=data_format)(conv_0_up)
model = Model(inputs=inputs, outputs=final_conv)
model.compile(optimizer=optimizer(lr=learning_rate), loss=loss_function, metrics=metrics)
print('U-net compiled.')
model.summary()
return model | def get_unet(patch_size, num_channels, activation, final_activation, optimizer, learning_rate, dropout, loss_function, metrics=None, kernel_size=(3, 3), pool_size=(2, 2), strides=(1, 1), num_kernels=None, concat_axis=3, data_format='channels_last', padding='same', bn=False):
"\n Defines the architecture of the u-net. Reconstruction of the u-net introduced in: https://arxiv.org/abs/1505.04597\n\n :param patch_size: height of the patches, positive integer\n :param num_channels: number of channels of the input images, positive integer\n :param activation: activation_function after every convolution\n :param final_activation: activation_function of the final layer\n :param optimizer: optimization algorithm for updating the weights and bias values\n :param learning_rate: learning_rate of the optimizer, float\n :param dropout: percentage of weights to be dropped, float between 0 and 1\n :param loss_function: loss function also known as cost function\n :param metrics: metrics for evaluation of the model performance\n :param kernel_size: size of the convolution kernel, tuple of two positive integers\n :param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers\n :param strides: strides values, tuple of two positive integers\n :param num_kernels: array specifying the number of convolution filters in every level, list of positive integers\n containing value for each level of the model\n :param concat_axis: concatenation axis, concatenate over channels, positive integer\n :param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'\n :param padding: used padding by convolution, takes values: 'same' or 'valid'\n :param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,\n False do not use Batch Normalization\n :return: compiled u-net model\n "
if (metrics is None):
metrics = ['accuracy']
if (num_kernels is None):
num_kernels = [64, 128, 256, 512, 1024]
inputs = Input((patch_size, patch_size, num_channels))
conv_0_down = conv_block(inputs, num_kernels[0], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_0 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_0_down)
conv_1_down = conv_block(pool_0, num_kernels[1], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_1 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_1_down)
conv_2_down = conv_block(pool_1, num_kernels[2], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_2 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_2_down)
conv_3_down = conv_block(pool_2, num_kernels[3], kernel_size, strides, padding, activation, dropout, data_format, bn)
pool_3 = MaxPooling2D(pool_size=pool_size, data_format=data_format)(conv_3_down)
conv_4 = conv_block(pool_3, num_kernels[4], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_3 = up_concat_block(conv_4, conv_3_down, pool_size, concat_axis, data_format)
conv_3_up = conv_block(concat_3, num_kernels[3], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_2 = up_concat_block(conv_3_up, conv_2_down, pool_size, concat_axis, data_format)
conv_2_up = conv_block(concat_2, num_kernels[2], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_1 = up_concat_block(conv_2_up, conv_1_down, pool_size, concat_axis, data_format)
conv_1_up = conv_block(concat_1, num_kernels[1], kernel_size, strides, padding, activation, dropout, data_format, bn)
concat_0 = up_concat_block(conv_1_up, conv_0_down, pool_size, concat_axis, data_format)
conv_0_up = conv_block(concat_0, num_kernels[0], kernel_size, strides, padding, activation, dropout, data_format, bn)
final_conv = Convolution2D(1, 1, strides=strides, activation=final_activation, padding=padding, data_format=data_format)(conv_0_up)
model = Model(inputs=inputs, outputs=final_conv)
model.compile(optimizer=optimizer(lr=learning_rate), loss=loss_function, metrics=metrics)
print('U-net compiled.')
model.summary()
return model<|docstring|>Defines the architecture of the u-net. Reconstruction of the u-net introduced in: https://arxiv.org/abs/1505.04597
:param patch_size: height of the patches, positive integer
:param num_channels: number of channels of the input images, positive integer
:param activation: activation_function after every convolution
:param final_activation: activation_function of the final layer
:param optimizer: optimization algorithm for updating the weights and bias values
:param learning_rate: learning_rate of the optimizer, float
:param dropout: percentage of weights to be dropped, float between 0 and 1
:param loss_function: loss function also known as cost function
:param metrics: metrics for evaluation of the model performance
:param kernel_size: size of the convolution kernel, tuple of two positive integers
:param pool_size: factors by which to downscale (vertical, horizontal), tuple of two positive integers
:param strides: strides values, tuple of two positive integers
:param num_kernels: array specifying the number of convolution filters in every level, list of positive integers
containing value for each level of the model
:param concat_axis: concatenation axis, concatenate over channels, positive integer
:param data_format: ordering of the dimensions in the inputs, takes values: 'channel_first' or 'channel_last'
:param padding: used padding by convolution, takes values: 'same' or 'valid'
:param bn: weather to use Batch Normalization layers after each convolution layer, True for use Batch Normalization,
False do not use Batch Normalization
:return: compiled u-net model<|endoftext|> |
2f12a58d24951b554fd1f9ccd332c4d8d29b1b78996ee28c07dc885580125b60 | def _cleanup(parts):
"\n Normalize up the parts matched by :obj:`parser.parser_re` to\n degrees, minutes, and seconds.\n\n >>> _cleanup({'latdir': 'south', 'longdir': 'west',\n ... 'latdeg':'60','latmin':'30',\n ... 'longdeg':'50','longmin':'40'})\n ['S', '60', '30', '00', 'W', '50', '40', '00']\n\n >>> _cleanup({'latdir': 'south', 'longdir': 'west',\n ... 'latdeg':'60','latmin':'30', 'latdecsec':'.50',\n ... 'longdeg':'50','longmin':'40','longdecsec':'.90'})\n ['S', '60', '30.50', '00', 'W', '50', '40.90', '00']\n\n "
latdir = (parts['latdir'] or parts['latdir2']).upper()[0]
longdir = (parts['longdir'] or parts['longdir2']).upper()[0]
latdeg = parts.get('latdeg')
longdeg = parts.get('longdeg')
latmin = (parts.get('latmin', '00') or '00')
longmin = (parts.get('longmin', '00') or '00')
latdecsec = parts.get('latdecsec', '')
longdecsec = parts.get('longdecsec', '')
if (latdecsec and longdecsec):
latmin += latdecsec
longmin += longdecsec
latsec = '00'
longsec = '00'
else:
latsec = (parts.get('latsec', '') or '00')
longsec = (parts.get('longsec', '') or '00')
return [latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec] | Normalize up the parts matched by :obj:`parser.parser_re` to
degrees, minutes, and seconds.
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30',
... 'longdeg':'50','longmin':'40'})
['S', '60', '30', '00', 'W', '50', '40', '00']
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30', 'latdecsec':'.50',
... 'longdeg':'50','longmin':'40','longdecsec':'.90'})
['S', '60', '30.50', '00', 'W', '50', '40.90', '00'] | geolucidate/functions.py | _cleanup | kurtraschke/geolucidate | 3 | python | def _cleanup(parts):
"\n Normalize up the parts matched by :obj:`parser.parser_re` to\n degrees, minutes, and seconds.\n\n >>> _cleanup({'latdir': 'south', 'longdir': 'west',\n ... 'latdeg':'60','latmin':'30',\n ... 'longdeg':'50','longmin':'40'})\n ['S', '60', '30', '00', 'W', '50', '40', '00']\n\n >>> _cleanup({'latdir': 'south', 'longdir': 'west',\n ... 'latdeg':'60','latmin':'30', 'latdecsec':'.50',\n ... 'longdeg':'50','longmin':'40','longdecsec':'.90'})\n ['S', '60', '30.50', '00', 'W', '50', '40.90', '00']\n\n "
latdir = (parts['latdir'] or parts['latdir2']).upper()[0]
longdir = (parts['longdir'] or parts['longdir2']).upper()[0]
latdeg = parts.get('latdeg')
longdeg = parts.get('longdeg')
latmin = (parts.get('latmin', '00') or '00')
longmin = (parts.get('longmin', '00') or '00')
latdecsec = parts.get('latdecsec', )
longdecsec = parts.get('longdecsec', )
if (latdecsec and longdecsec):
latmin += latdecsec
longmin += longdecsec
latsec = '00'
longsec = '00'
else:
latsec = (parts.get('latsec', ) or '00')
longsec = (parts.get('longsec', ) or '00')
return [latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec] | def _cleanup(parts):
"\n Normalize up the parts matched by :obj:`parser.parser_re` to\n degrees, minutes, and seconds.\n\n >>> _cleanup({'latdir': 'south', 'longdir': 'west',\n ... 'latdeg':'60','latmin':'30',\n ... 'longdeg':'50','longmin':'40'})\n ['S', '60', '30', '00', 'W', '50', '40', '00']\n\n >>> _cleanup({'latdir': 'south', 'longdir': 'west',\n ... 'latdeg':'60','latmin':'30', 'latdecsec':'.50',\n ... 'longdeg':'50','longmin':'40','longdecsec':'.90'})\n ['S', '60', '30.50', '00', 'W', '50', '40.90', '00']\n\n "
latdir = (parts['latdir'] or parts['latdir2']).upper()[0]
longdir = (parts['longdir'] or parts['longdir2']).upper()[0]
latdeg = parts.get('latdeg')
longdeg = parts.get('longdeg')
latmin = (parts.get('latmin', '00') or '00')
longmin = (parts.get('longmin', '00') or '00')
latdecsec = parts.get('latdecsec', )
longdecsec = parts.get('longdecsec', )
if (latdecsec and longdecsec):
latmin += latdecsec
longmin += longdecsec
latsec = '00'
longsec = '00'
else:
latsec = (parts.get('latsec', ) or '00')
longsec = (parts.get('longsec', ) or '00')
return [latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec]<|docstring|>Normalize up the parts matched by :obj:`parser.parser_re` to
degrees, minutes, and seconds.
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30',
... 'longdeg':'50','longmin':'40'})
['S', '60', '30', '00', 'W', '50', '40', '00']
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30', 'latdecsec':'.50',
... 'longdeg':'50','longmin':'40','longdecsec':'.90'})
['S', '60', '30.50', '00', 'W', '50', '40.90', '00']<|endoftext|> |
94e11cb530db18d434d376ebf357c1ea0d6576a94e3dc980e060b3050e727c36 | def _convert(latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec):
"\n Convert normalized degrees, minutes, and seconds to decimal degrees.\n Quantize the converted value based on the input precision and\n return a 2-tuple of strings.\n\n >>> _convert('S','50','30','30','W','50','30','30')\n ('-50.508333', '-50.508333')\n\n >>> _convert('N','50','27','55','W','127','27','65')\n ('50.459167', '-127.460833')\n\n "
if ((latsec != '00') or (longsec != '00')):
precision = Decimal('0.000001')
elif ((latmin != '00') or (longmin != '00')):
precision = Decimal('0.001')
else:
precision = Decimal('1')
latitude = Decimal(latdeg)
latmin = Decimal(latmin)
latsec = Decimal(latsec)
longitude = Decimal(longdeg)
longmin = Decimal(longmin)
longsec = Decimal(longsec)
if ((latsec > 59) or (longsec > 59)):
latitude += ((latmin + (latsec / Decimal('100'))) / Decimal('60'))
longitude += ((longmin + (longsec / Decimal('100'))) / Decimal('60'))
else:
latitude += ((latmin + (latsec / Decimal('60'))) / Decimal('60'))
longitude += ((longmin + (longsec / Decimal('60'))) / Decimal('60'))
if (latdir == 'S'):
latitude *= Decimal('-1')
if (longdir == 'W'):
longitude *= Decimal('-1')
lat_str = str(latitude.quantize(precision))
long_str = str(longitude.quantize(precision))
return (lat_str, long_str) | Convert normalized degrees, minutes, and seconds to decimal degrees.
Quantize the converted value based on the input precision and
return a 2-tuple of strings.
>>> _convert('S','50','30','30','W','50','30','30')
('-50.508333', '-50.508333')
>>> _convert('N','50','27','55','W','127','27','65')
('50.459167', '-127.460833') | geolucidate/functions.py | _convert | kurtraschke/geolucidate | 3 | python | def _convert(latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec):
"\n Convert normalized degrees, minutes, and seconds to decimal degrees.\n Quantize the converted value based on the input precision and\n return a 2-tuple of strings.\n\n >>> _convert('S','50','30','30','W','50','30','30')\n ('-50.508333', '-50.508333')\n\n >>> _convert('N','50','27','55','W','127','27','65')\n ('50.459167', '-127.460833')\n\n "
if ((latsec != '00') or (longsec != '00')):
precision = Decimal('0.000001')
elif ((latmin != '00') or (longmin != '00')):
precision = Decimal('0.001')
else:
precision = Decimal('1')
latitude = Decimal(latdeg)
latmin = Decimal(latmin)
latsec = Decimal(latsec)
longitude = Decimal(longdeg)
longmin = Decimal(longmin)
longsec = Decimal(longsec)
if ((latsec > 59) or (longsec > 59)):
latitude += ((latmin + (latsec / Decimal('100'))) / Decimal('60'))
longitude += ((longmin + (longsec / Decimal('100'))) / Decimal('60'))
else:
latitude += ((latmin + (latsec / Decimal('60'))) / Decimal('60'))
longitude += ((longmin + (longsec / Decimal('60'))) / Decimal('60'))
if (latdir == 'S'):
latitude *= Decimal('-1')
if (longdir == 'W'):
longitude *= Decimal('-1')
lat_str = str(latitude.quantize(precision))
long_str = str(longitude.quantize(precision))
return (lat_str, long_str) | def _convert(latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec):
"\n Convert normalized degrees, minutes, and seconds to decimal degrees.\n Quantize the converted value based on the input precision and\n return a 2-tuple of strings.\n\n >>> _convert('S','50','30','30','W','50','30','30')\n ('-50.508333', '-50.508333')\n\n >>> _convert('N','50','27','55','W','127','27','65')\n ('50.459167', '-127.460833')\n\n "
if ((latsec != '00') or (longsec != '00')):
precision = Decimal('0.000001')
elif ((latmin != '00') or (longmin != '00')):
precision = Decimal('0.001')
else:
precision = Decimal('1')
latitude = Decimal(latdeg)
latmin = Decimal(latmin)
latsec = Decimal(latsec)
longitude = Decimal(longdeg)
longmin = Decimal(longmin)
longsec = Decimal(longsec)
if ((latsec > 59) or (longsec > 59)):
latitude += ((latmin + (latsec / Decimal('100'))) / Decimal('60'))
longitude += ((longmin + (longsec / Decimal('100'))) / Decimal('60'))
else:
latitude += ((latmin + (latsec / Decimal('60'))) / Decimal('60'))
longitude += ((longmin + (longsec / Decimal('60'))) / Decimal('60'))
if (latdir == 'S'):
latitude *= Decimal('-1')
if (longdir == 'W'):
longitude *= Decimal('-1')
lat_str = str(latitude.quantize(precision))
long_str = str(longitude.quantize(precision))
return (lat_str, long_str)<|docstring|>Convert normalized degrees, minutes, and seconds to decimal degrees.
Quantize the converted value based on the input precision and
return a 2-tuple of strings.
>>> _convert('S','50','30','30','W','50','30','30')
('-50.508333', '-50.508333')
>>> _convert('N','50','27','55','W','127','27','65')
('50.459167', '-127.460833')<|endoftext|> |
f135c60dd2c1e9f4e5cb480ef5f2ed73ed6d2814d0fb775345555e1ec52ccaaf | def replace(string, sub_function=google_maps_link()):
'\n Replace detected coordinates with a map link, using the given substitution\n function.\n\n The substitution function will be passed a :class:`~.MapLink` instance, and\n should return a string which will be substituted by :func:`re.sub` in place\n of the detected coordinates.\n\n >>> replace("58147N/07720W")\n \'<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>\'\n\n >>> replace("5814N/07720W", google_maps_link(\'satellite\'))\n \'<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>\'\n\n >>> from geolucidate.links.bing import bing_maps_link\n >>> replace("58N/077W", bing_maps_link(\'map\'))\n \'<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>\'\n\n '
def do_replace(match):
original_string = match.group()
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
return sub_function(MapLink(original_string, latitude, longitude))
return parser_re.sub(do_replace, string) | Replace detected coordinates with a map link, using the given substitution
function.
The substitution function will be passed a :class:`~.MapLink` instance, and
should return a string which will be substituted by :func:`re.sub` in place
of the detected coordinates.
>>> replace("58147N/07720W")
'<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>'
>>> replace("5814N/07720W", google_maps_link('satellite'))
'<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>'
>>> from geolucidate.links.bing import bing_maps_link
>>> replace("58N/077W", bing_maps_link('map'))
'<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>' | geolucidate/functions.py | replace | kurtraschke/geolucidate | 3 | python | def replace(string, sub_function=google_maps_link()):
'\n Replace detected coordinates with a map link, using the given substitution\n function.\n\n The substitution function will be passed a :class:`~.MapLink` instance, and\n should return a string which will be substituted by :func:`re.sub` in place\n of the detected coordinates.\n\n >>> replace("58147N/07720W")\n \'<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>\'\n\n >>> replace("5814N/07720W", google_maps_link(\'satellite\'))\n \'<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>\'\n\n >>> from geolucidate.links.bing import bing_maps_link\n >>> replace("58N/077W", bing_maps_link(\'map\'))\n \'<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>\'\n\n '
def do_replace(match):
original_string = match.group()
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
return sub_function(MapLink(original_string, latitude, longitude))
return parser_re.sub(do_replace, string) | def replace(string, sub_function=google_maps_link()):
'\n Replace detected coordinates with a map link, using the given substitution\n function.\n\n The substitution function will be passed a :class:`~.MapLink` instance, and\n should return a string which will be substituted by :func:`re.sub` in place\n of the detected coordinates.\n\n >>> replace("58147N/07720W")\n \'<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>\'\n\n >>> replace("5814N/07720W", google_maps_link(\'satellite\'))\n \'<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>\'\n\n >>> from geolucidate.links.bing import bing_maps_link\n >>> replace("58N/077W", bing_maps_link(\'map\'))\n \'<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>\'\n\n '
def do_replace(match):
original_string = match.group()
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
return sub_function(MapLink(original_string, latitude, longitude))
return parser_re.sub(do_replace, string)<|docstring|>Replace detected coordinates with a map link, using the given substitution
function.
The substitution function will be passed a :class:`~.MapLink` instance, and
should return a string which will be substituted by :func:`re.sub` in place
of the detected coordinates.
>>> replace("58147N/07720W")
'<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>'
>>> replace("5814N/07720W", google_maps_link('satellite'))
'<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>'
>>> from geolucidate.links.bing import bing_maps_link
>>> replace("58N/077W", bing_maps_link('map'))
'<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>'<|endoftext|> |
13494fdf928acc006e03962368b679f46da40873701076c9ae95047890ce8603 | def get_replacements(string, sub_function=google_maps_link()):
'\n Return a dict whose keys are instances of :class:`re.Match` and\n whose values are the corresponding replacements. Use\n :func:`get_replacements` when the replacement cannot be performed\n through ordinary string substitution by :func:`re.sub`, as in\n :func:`replace`.\n\n\n >>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W")\n ... #doctest: +ELLIPSIS\n {<re.Match object...>: \'<a href="..." title="...">4630 NORTH 5705 WEST</a>\', <re.Match object...>: \'<a href="..." title="...">58147N/07720W</a>\'}\n\n >>> test_string = "4630 NORTH 5705 WEST 58147N/07720W"\n >>> replacements = get_replacements(test_string)\n >>> offset = 0\n >>> out = bytearray(test_string, encoding="ascii", errors="replace")\n >>> for (match, link) in replacements.items():\n ... start = match.start() + offset\n ... end = match.end() + offset\n ... out[start:end] = bytearray(link, encoding="ascii", errors="replace")\n ... offset += (len(link) - len(match.group()))\n >>> out.decode(encoding="ascii") == replace(test_string)\n True\n '
substitutions = {}
matches = parser_re.finditer(string)
for match in matches:
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
substitutions[match] = sub_function(MapLink(match.group(), latitude, longitude))
return substitutions | Return a dict whose keys are instances of :class:`re.Match` and
whose values are the corresponding replacements. Use
:func:`get_replacements` when the replacement cannot be performed
through ordinary string substitution by :func:`re.sub`, as in
:func:`replace`.
>>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W")
... #doctest: +ELLIPSIS
{<re.Match object...>: '<a href="..." title="...">4630 NORTH 5705 WEST</a>', <re.Match object...>: '<a href="..." title="...">58147N/07720W</a>'}
>>> test_string = "4630 NORTH 5705 WEST 58147N/07720W"
>>> replacements = get_replacements(test_string)
>>> offset = 0
>>> out = bytearray(test_string, encoding="ascii", errors="replace")
>>> for (match, link) in replacements.items():
... start = match.start() + offset
... end = match.end() + offset
... out[start:end] = bytearray(link, encoding="ascii", errors="replace")
... offset += (len(link) - len(match.group()))
>>> out.decode(encoding="ascii") == replace(test_string)
True | geolucidate/functions.py | get_replacements | kurtraschke/geolucidate | 3 | python | def get_replacements(string, sub_function=google_maps_link()):
'\n Return a dict whose keys are instances of :class:`re.Match` and\n whose values are the corresponding replacements. Use\n :func:`get_replacements` when the replacement cannot be performed\n through ordinary string substitution by :func:`re.sub`, as in\n :func:`replace`.\n\n\n >>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W")\n ... #doctest: +ELLIPSIS\n {<re.Match object...>: \'<a href="..." title="...">4630 NORTH 5705 WEST</a>\', <re.Match object...>: \'<a href="..." title="...">58147N/07720W</a>\'}\n\n >>> test_string = "4630 NORTH 5705 WEST 58147N/07720W"\n >>> replacements = get_replacements(test_string)\n >>> offset = 0\n >>> out = bytearray(test_string, encoding="ascii", errors="replace")\n >>> for (match, link) in replacements.items():\n ... start = match.start() + offset\n ... end = match.end() + offset\n ... out[start:end] = bytearray(link, encoding="ascii", errors="replace")\n ... offset += (len(link) - len(match.group()))\n >>> out.decode(encoding="ascii") == replace(test_string)\n True\n '
substitutions = {}
matches = parser_re.finditer(string)
for match in matches:
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
substitutions[match] = sub_function(MapLink(match.group(), latitude, longitude))
return substitutions | def get_replacements(string, sub_function=google_maps_link()):
'\n Return a dict whose keys are instances of :class:`re.Match` and\n whose values are the corresponding replacements. Use\n :func:`get_replacements` when the replacement cannot be performed\n through ordinary string substitution by :func:`re.sub`, as in\n :func:`replace`.\n\n\n >>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W")\n ... #doctest: +ELLIPSIS\n {<re.Match object...>: \'<a href="..." title="...">4630 NORTH 5705 WEST</a>\', <re.Match object...>: \'<a href="..." title="...">58147N/07720W</a>\'}\n\n >>> test_string = "4630 NORTH 5705 WEST 58147N/07720W"\n >>> replacements = get_replacements(test_string)\n >>> offset = 0\n >>> out = bytearray(test_string, encoding="ascii", errors="replace")\n >>> for (match, link) in replacements.items():\n ... start = match.start() + offset\n ... end = match.end() + offset\n ... out[start:end] = bytearray(link, encoding="ascii", errors="replace")\n ... offset += (len(link) - len(match.group()))\n >>> out.decode(encoding="ascii") == replace(test_string)\n True\n '
substitutions = {}
matches = parser_re.finditer(string)
for match in matches:
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
substitutions[match] = sub_function(MapLink(match.group(), latitude, longitude))
return substitutions<|docstring|>Return a dict whose keys are instances of :class:`re.Match` and
whose values are the corresponding replacements. Use
:func:`get_replacements` when the replacement cannot be performed
through ordinary string substitution by :func:`re.sub`, as in
:func:`replace`.
>>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W")
... #doctest: +ELLIPSIS
{<re.Match object...>: '<a href="..." title="...">4630 NORTH 5705 WEST</a>', <re.Match object...>: '<a href="..." title="...">58147N/07720W</a>'}
>>> test_string = "4630 NORTH 5705 WEST 58147N/07720W"
>>> replacements = get_replacements(test_string)
>>> offset = 0
>>> out = bytearray(test_string, encoding="ascii", errors="replace")
>>> for (match, link) in replacements.items():
... start = match.start() + offset
... end = match.end() + offset
... out[start:end] = bytearray(link, encoding="ascii", errors="replace")
... offset += (len(link) - len(match.group()))
>>> out.decode(encoding="ascii") == replace(test_string)
True<|endoftext|> |
3892b595c38fe6aaa4d3bfea2a63783355f571fa76551945da2ee5d50cebc58d | def pre_validate(self, form):
'\n 校验表单传值是否合法\n '
for (v, _) in self.choices:
if (text_type(self.data) == text_type(v)):
break
else:
raise ValueError(self.gettext('Not a valid choice')) | 校验表单传值是否合法 | app_backend/forms/__init__.py | pre_validate | zhanghe06/bearing_project | 1 | python | def pre_validate(self, form):
'\n \n '
for (v, _) in self.choices:
if (text_type(self.data) == text_type(v)):
break
else:
raise ValueError(self.gettext('Not a valid choice')) | def pre_validate(self, form):
'\n \n '
for (v, _) in self.choices:
if (text_type(self.data) == text_type(v)):
break
else:
raise ValueError(self.gettext('Not a valid choice'))<|docstring|>校验表单传值是否合法<|endoftext|> |
7a2afdd9fa63f5d7350568790e6b22ded751a99175e1fb57b30452a664617f13 | def save_file(filename: str) -> str:
'\n Saves the given file to a local directory,\n and returns the generated file name.\n '
return secure_filename(filename) | Saves the given file to a local directory,
and returns the generated file name. | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/uploads.py | save_file | aryaniyaps/flask-graphql-boilerplate | 4 | python | def save_file(filename: str) -> str:
'\n Saves the given file to a local directory,\n and returns the generated file name.\n '
return secure_filename(filename) | def save_file(filename: str) -> str:
'\n Saves the given file to a local directory,\n and returns the generated file name.\n '
return secure_filename(filename)<|docstring|>Saves the given file to a local directory,
and returns the generated file name.<|endoftext|> |
c10890e0bd9a6d99a449e5721fdb2599e41fc12d1f978e250cd34ed4bcaf48ad | def fold(H, columns=None):
'\n Fold a design to reduce confounding effects.\n \n Parameters\n ----------\n H : 2d-array\n The design matrix to be folded.\n columns : array\n Indices of of columns to fold (Default: None). If ``columns=None`` is\n used, then all columns will be folded.\n \n Returns\n -------\n Hf : 2d-array\n The folded design matrix.\n \n Examples\n --------\n ::\n \n '
H = np.array(H)
assert (len(H.shape) == 2), 'Input design matrix must be 2d.'
if (columns is None):
columns = range(H.shape[1])
Hf = H.copy()
for col in columns:
vals = np.unique(H[(:, col)])
assert (len(vals) == 2), 'Input design matrix must be 2-level factors only.'
for i in range(H.shape[0]):
Hf[(i, col)] = (vals[0] if (H[(i, col)] == vals[1]) else vals[1])
Hf = np.vstack((H, Hf))
return Hf | Fold a design to reduce confounding effects.
Parameters
----------
H : 2d-array
The design matrix to be folded.
columns : array
Indices of of columns to fold (Default: None). If ``columns=None`` is
used, then all columns will be folded.
Returns
-------
Hf : 2d-array
The folded design matrix.
Examples
--------
:: | framework/contrib/pyDOE/doe_fold.py | fold | greenwoodms06/raven | 184 | python | def fold(H, columns=None):
'\n Fold a design to reduce confounding effects.\n \n Parameters\n ----------\n H : 2d-array\n The design matrix to be folded.\n columns : array\n Indices of of columns to fold (Default: None). If ``columns=None`` is\n used, then all columns will be folded.\n \n Returns\n -------\n Hf : 2d-array\n The folded design matrix.\n \n Examples\n --------\n ::\n \n '
H = np.array(H)
assert (len(H.shape) == 2), 'Input design matrix must be 2d.'
if (columns is None):
columns = range(H.shape[1])
Hf = H.copy()
for col in columns:
vals = np.unique(H[(:, col)])
assert (len(vals) == 2), 'Input design matrix must be 2-level factors only.'
for i in range(H.shape[0]):
Hf[(i, col)] = (vals[0] if (H[(i, col)] == vals[1]) else vals[1])
Hf = np.vstack((H, Hf))
return Hf | def fold(H, columns=None):
'\n Fold a design to reduce confounding effects.\n \n Parameters\n ----------\n H : 2d-array\n The design matrix to be folded.\n columns : array\n Indices of of columns to fold (Default: None). If ``columns=None`` is\n used, then all columns will be folded.\n \n Returns\n -------\n Hf : 2d-array\n The folded design matrix.\n \n Examples\n --------\n ::\n \n '
H = np.array(H)
assert (len(H.shape) == 2), 'Input design matrix must be 2d.'
if (columns is None):
columns = range(H.shape[1])
Hf = H.copy()
for col in columns:
vals = np.unique(H[(:, col)])
assert (len(vals) == 2), 'Input design matrix must be 2-level factors only.'
for i in range(H.shape[0]):
Hf[(i, col)] = (vals[0] if (H[(i, col)] == vals[1]) else vals[1])
Hf = np.vstack((H, Hf))
return Hf<|docstring|>Fold a design to reduce confounding effects.
Parameters
----------
H : 2d-array
The design matrix to be folded.
columns : array
Indices of of columns to fold (Default: None). If ``columns=None`` is
used, then all columns will be folded.
Returns
-------
Hf : 2d-array
The folded design matrix.
Examples
--------
::<|endoftext|> |
0069471df4890192e8436d7912aed0708a1be7a95ee8b1e2a1685b6ff27d935e | def isEnabled(self):
'\n Note: this may be misleading if enable(), disable() not used\n '
return self.fEnabled | Note: this may be misleading if enable(), disable() not used | dependencies/panda/direct/particles/ParticleEffect.py | isEnabled | SuperM0use24/Project-Altis | 0 | python | def isEnabled(self):
'\n \n '
return self.fEnabled | def isEnabled(self):
'\n \n '
return self.fEnabled<|docstring|>Note: this may be misleading if enable(), disable() not used<|endoftext|> |
4d47b413db60f2a8033e96a000f546294b604ec3d472560efed2fdb141560d94 | def __init__(self, concurrency_policy=None, failed_jobs_history_limit=None, schedule=None, starting_deadline_seconds=None, successful_jobs_history_limit=None, suspend=None, timezone=None, workflow_metadata=None, workflow_spec=None):
'V1alpha1CronWorkflowSpec - a model defined in Swagger'
self._concurrency_policy = None
self._failed_jobs_history_limit = None
self._schedule = None
self._starting_deadline_seconds = None
self._successful_jobs_history_limit = None
self._suspend = None
self._timezone = None
self._workflow_metadata = None
self._workflow_spec = None
self.discriminator = None
if (concurrency_policy is not None):
self.concurrency_policy = concurrency_policy
if (failed_jobs_history_limit is not None):
self.failed_jobs_history_limit = failed_jobs_history_limit
self.schedule = schedule
if (starting_deadline_seconds is not None):
self.starting_deadline_seconds = starting_deadline_seconds
if (successful_jobs_history_limit is not None):
self.successful_jobs_history_limit = successful_jobs_history_limit
if (suspend is not None):
self.suspend = suspend
if (timezone is not None):
self.timezone = timezone
if (workflow_metadata is not None):
self.workflow_metadata = workflow_metadata
self.workflow_spec = workflow_spec | V1alpha1CronWorkflowSpec - a model defined in Swagger | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | __init__ | ButterflyNetwork/argo-client-python | 0 | python | def __init__(self, concurrency_policy=None, failed_jobs_history_limit=None, schedule=None, starting_deadline_seconds=None, successful_jobs_history_limit=None, suspend=None, timezone=None, workflow_metadata=None, workflow_spec=None):
self._concurrency_policy = None
self._failed_jobs_history_limit = None
self._schedule = None
self._starting_deadline_seconds = None
self._successful_jobs_history_limit = None
self._suspend = None
self._timezone = None
self._workflow_metadata = None
self._workflow_spec = None
self.discriminator = None
if (concurrency_policy is not None):
self.concurrency_policy = concurrency_policy
if (failed_jobs_history_limit is not None):
self.failed_jobs_history_limit = failed_jobs_history_limit
self.schedule = schedule
if (starting_deadline_seconds is not None):
self.starting_deadline_seconds = starting_deadline_seconds
if (successful_jobs_history_limit is not None):
self.successful_jobs_history_limit = successful_jobs_history_limit
if (suspend is not None):
self.suspend = suspend
if (timezone is not None):
self.timezone = timezone
if (workflow_metadata is not None):
self.workflow_metadata = workflow_metadata
self.workflow_spec = workflow_spec | def __init__(self, concurrency_policy=None, failed_jobs_history_limit=None, schedule=None, starting_deadline_seconds=None, successful_jobs_history_limit=None, suspend=None, timezone=None, workflow_metadata=None, workflow_spec=None):
self._concurrency_policy = None
self._failed_jobs_history_limit = None
self._schedule = None
self._starting_deadline_seconds = None
self._successful_jobs_history_limit = None
self._suspend = None
self._timezone = None
self._workflow_metadata = None
self._workflow_spec = None
self.discriminator = None
if (concurrency_policy is not None):
self.concurrency_policy = concurrency_policy
if (failed_jobs_history_limit is not None):
self.failed_jobs_history_limit = failed_jobs_history_limit
self.schedule = schedule
if (starting_deadline_seconds is not None):
self.starting_deadline_seconds = starting_deadline_seconds
if (successful_jobs_history_limit is not None):
self.successful_jobs_history_limit = successful_jobs_history_limit
if (suspend is not None):
self.suspend = suspend
if (timezone is not None):
self.timezone = timezone
if (workflow_metadata is not None):
self.workflow_metadata = workflow_metadata
self.workflow_spec = workflow_spec<|docstring|>V1alpha1CronWorkflowSpec - a model defined in Swagger<|endoftext|> |
4fae84bc616a243c609d9972fc9478963fdb9b7b22dd50a184d231aa7e5cb571 | @property
def concurrency_policy(self):
'Gets the concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501\n\n :return: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: str\n '
return self._concurrency_policy | Gets the concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501
ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501
:return: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: str | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | concurrency_policy | ButterflyNetwork/argo-client-python | 0 | python | @property
def concurrency_policy(self):
'Gets the concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501\n\n :return: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: str\n '
return self._concurrency_policy | @property
def concurrency_policy(self):
'Gets the concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501\n\n :return: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: str\n '
return self._concurrency_policy<|docstring|>Gets the concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501
ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501
:return: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: str<|endoftext|> |
ce573ec0686435ca826c40ccc4db12286936cdca38d97e3c3d3f149ab42d022d | @concurrency_policy.setter
def concurrency_policy(self, concurrency_policy):
'Sets the concurrency_policy of this V1alpha1CronWorkflowSpec.\n\n ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501\n\n :param concurrency_policy: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: str\n '
self._concurrency_policy = concurrency_policy | Sets the concurrency_policy of this V1alpha1CronWorkflowSpec.
ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501
:param concurrency_policy: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: str | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | concurrency_policy | ButterflyNetwork/argo-client-python | 0 | python | @concurrency_policy.setter
def concurrency_policy(self, concurrency_policy):
'Sets the concurrency_policy of this V1alpha1CronWorkflowSpec.\n\n ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501\n\n :param concurrency_policy: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: str\n '
self._concurrency_policy = concurrency_policy | @concurrency_policy.setter
def concurrency_policy(self, concurrency_policy):
'Sets the concurrency_policy of this V1alpha1CronWorkflowSpec.\n\n ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501\n\n :param concurrency_policy: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: str\n '
self._concurrency_policy = concurrency_policy<|docstring|>Sets the concurrency_policy of this V1alpha1CronWorkflowSpec.
ConcurrencyPolicy is the K8s-style concurrency policy that will be used # noqa: E501
:param concurrency_policy: The concurrency_policy of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: str<|endoftext|> |
0d4bfa221fccc85ca82c6136cae01b2a7244100b83d04560971a678670eb7e3a | @property
def failed_jobs_history_limit(self):
'Gets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :return: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._failed_jobs_history_limit | Gets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:return: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: int | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | failed_jobs_history_limit | ButterflyNetwork/argo-client-python | 0 | python | @property
def failed_jobs_history_limit(self):
'Gets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :return: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._failed_jobs_history_limit | @property
def failed_jobs_history_limit(self):
'Gets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :return: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._failed_jobs_history_limit<|docstring|>Gets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:return: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: int<|endoftext|> |
4385157b4a5d0811122a8d779187f7e4b98ceb5b08591fd87181e9823931b524 | @failed_jobs_history_limit.setter
def failed_jobs_history_limit(self, failed_jobs_history_limit):
'Sets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec.\n\n FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :param failed_jobs_history_limit: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._failed_jobs_history_limit = failed_jobs_history_limit | Sets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec.
FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:param failed_jobs_history_limit: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: int | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | failed_jobs_history_limit | ButterflyNetwork/argo-client-python | 0 | python | @failed_jobs_history_limit.setter
def failed_jobs_history_limit(self, failed_jobs_history_limit):
'Sets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec.\n\n FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :param failed_jobs_history_limit: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._failed_jobs_history_limit = failed_jobs_history_limit | @failed_jobs_history_limit.setter
def failed_jobs_history_limit(self, failed_jobs_history_limit):
'Sets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec.\n\n FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :param failed_jobs_history_limit: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._failed_jobs_history_limit = failed_jobs_history_limit<|docstring|>Sets the failed_jobs_history_limit of this V1alpha1CronWorkflowSpec.
FailedJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:param failed_jobs_history_limit: The failed_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: int<|endoftext|> |
b1c386b779a1128b1b9e71d716e39777633b71146ee1218ba52a0c8b6120dfe2 | @property
def schedule(self):
'Gets the schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n Schedule is a schedule to run the Workflow in Cron format # noqa: E501\n\n :return: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: str\n '
return self._schedule | Gets the schedule of this V1alpha1CronWorkflowSpec. # noqa: E501
Schedule is a schedule to run the Workflow in Cron format # noqa: E501
:return: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: str | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | schedule | ButterflyNetwork/argo-client-python | 0 | python | @property
def schedule(self):
'Gets the schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n Schedule is a schedule to run the Workflow in Cron format # noqa: E501\n\n :return: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: str\n '
return self._schedule | @property
def schedule(self):
'Gets the schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n Schedule is a schedule to run the Workflow in Cron format # noqa: E501\n\n :return: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: str\n '
return self._schedule<|docstring|>Gets the schedule of this V1alpha1CronWorkflowSpec. # noqa: E501
Schedule is a schedule to run the Workflow in Cron format # noqa: E501
:return: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: str<|endoftext|> |
c3d0bbdd14595925414aca92ba8cb9c013b1dab4eef752654fe4a05a901ef451 | @schedule.setter
def schedule(self, schedule):
'Sets the schedule of this V1alpha1CronWorkflowSpec.\n\n Schedule is a schedule to run the Workflow in Cron format # noqa: E501\n\n :param schedule: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: str\n '
if (schedule is None):
raise ValueError('Invalid value for `schedule`, must not be `None`')
self._schedule = schedule | Sets the schedule of this V1alpha1CronWorkflowSpec.
Schedule is a schedule to run the Workflow in Cron format # noqa: E501
:param schedule: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: str | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | schedule | ButterflyNetwork/argo-client-python | 0 | python | @schedule.setter
def schedule(self, schedule):
'Sets the schedule of this V1alpha1CronWorkflowSpec.\n\n Schedule is a schedule to run the Workflow in Cron format # noqa: E501\n\n :param schedule: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: str\n '
if (schedule is None):
raise ValueError('Invalid value for `schedule`, must not be `None`')
self._schedule = schedule | @schedule.setter
def schedule(self, schedule):
'Sets the schedule of this V1alpha1CronWorkflowSpec.\n\n Schedule is a schedule to run the Workflow in Cron format # noqa: E501\n\n :param schedule: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: str\n '
if (schedule is None):
raise ValueError('Invalid value for `schedule`, must not be `None`')
self._schedule = schedule<|docstring|>Sets the schedule of this V1alpha1CronWorkflowSpec.
Schedule is a schedule to run the Workflow in Cron format # noqa: E501
:param schedule: The schedule of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: str<|endoftext|> |
c00e250b2ebfa2f23c4b34d8028291081c14f09836e29a81c3cb26bd9db3f115 | @property
def starting_deadline_seconds(self):
'Gets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501\n\n :return: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._starting_deadline_seconds | Gets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501
StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501
:return: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: int | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | starting_deadline_seconds | ButterflyNetwork/argo-client-python | 0 | python | @property
def starting_deadline_seconds(self):
'Gets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501\n\n :return: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._starting_deadline_seconds | @property
def starting_deadline_seconds(self):
'Gets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501\n\n :return: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._starting_deadline_seconds<|docstring|>Gets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501
StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501
:return: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: int<|endoftext|> |
077aaa520b0583fb049f9c27f8e2283b806c4bfeccc9185cb2141e501ba3390d | @starting_deadline_seconds.setter
def starting_deadline_seconds(self, starting_deadline_seconds):
'Sets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec.\n\n StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501\n\n :param starting_deadline_seconds: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._starting_deadline_seconds = starting_deadline_seconds | Sets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec.
StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501
:param starting_deadline_seconds: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: int | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | starting_deadline_seconds | ButterflyNetwork/argo-client-python | 0 | python | @starting_deadline_seconds.setter
def starting_deadline_seconds(self, starting_deadline_seconds):
'Sets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec.\n\n StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501\n\n :param starting_deadline_seconds: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._starting_deadline_seconds = starting_deadline_seconds | @starting_deadline_seconds.setter
def starting_deadline_seconds(self, starting_deadline_seconds):
'Sets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec.\n\n StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501\n\n :param starting_deadline_seconds: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._starting_deadline_seconds = starting_deadline_seconds<|docstring|>Sets the starting_deadline_seconds of this V1alpha1CronWorkflowSpec.
StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed. # noqa: E501
:param starting_deadline_seconds: The starting_deadline_seconds of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: int<|endoftext|> |
947cb0b7d6a3df67a8f4125706a897056720ff949a53944ea2fc58cf559f831d | @property
def successful_jobs_history_limit(self):
'Gets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :return: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._successful_jobs_history_limit | Gets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:return: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: int | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | successful_jobs_history_limit | ButterflyNetwork/argo-client-python | 0 | python | @property
def successful_jobs_history_limit(self):
'Gets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :return: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._successful_jobs_history_limit | @property
def successful_jobs_history_limit(self):
'Gets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :return: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: int\n '
return self._successful_jobs_history_limit<|docstring|>Gets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:return: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: int<|endoftext|> |
a99781fd71fe059becd0ce3cacf02a5057f0833a65be390005bfade04d291962 | @successful_jobs_history_limit.setter
def successful_jobs_history_limit(self, successful_jobs_history_limit):
'Sets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec.\n\n SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :param successful_jobs_history_limit: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._successful_jobs_history_limit = successful_jobs_history_limit | Sets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec.
SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:param successful_jobs_history_limit: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: int | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | successful_jobs_history_limit | ButterflyNetwork/argo-client-python | 0 | python | @successful_jobs_history_limit.setter
def successful_jobs_history_limit(self, successful_jobs_history_limit):
'Sets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec.\n\n SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :param successful_jobs_history_limit: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._successful_jobs_history_limit = successful_jobs_history_limit | @successful_jobs_history_limit.setter
def successful_jobs_history_limit(self, successful_jobs_history_limit):
'Sets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec.\n\n SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501\n\n :param successful_jobs_history_limit: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: int\n '
self._successful_jobs_history_limit = successful_jobs_history_limit<|docstring|>Sets the successful_jobs_history_limit of this V1alpha1CronWorkflowSpec.
SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time # noqa: E501
:param successful_jobs_history_limit: The successful_jobs_history_limit of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: int<|endoftext|> |
a103f82d97355e7988d96ca76a178cce86f595c71165db93219f46943d4f4848 | @property
def suspend(self):
'Gets the suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501\n\n :return: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: bool\n '
return self._suspend | Gets the suspend of this V1alpha1CronWorkflowSpec. # noqa: E501
Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501
:return: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: bool | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | suspend | ButterflyNetwork/argo-client-python | 0 | python | @property
def suspend(self):
'Gets the suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501\n\n :return: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: bool\n '
return self._suspend | @property
def suspend(self):
'Gets the suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n\n Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501\n\n :return: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n :rtype: bool\n '
return self._suspend<|docstring|>Gets the suspend of this V1alpha1CronWorkflowSpec. # noqa: E501
Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501
:return: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501
:rtype: bool<|endoftext|> |
6dd27552770a9637c42755afd1e6592b2710263b160640343bdef9e61f6482d9 | @suspend.setter
def suspend(self, suspend):
'Sets the suspend of this V1alpha1CronWorkflowSpec.\n\n Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501\n\n :param suspend: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: bool\n '
self._suspend = suspend | Sets the suspend of this V1alpha1CronWorkflowSpec.
Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501
:param suspend: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: bool | argo/workflows/client/models/v1alpha1_cron_workflow_spec.py | suspend | ButterflyNetwork/argo-client-python | 0 | python | @suspend.setter
def suspend(self, suspend):
'Sets the suspend of this V1alpha1CronWorkflowSpec.\n\n Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501\n\n :param suspend: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: bool\n '
self._suspend = suspend | @suspend.setter
def suspend(self, suspend):
'Sets the suspend of this V1alpha1CronWorkflowSpec.\n\n Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501\n\n :param suspend: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501\n :type: bool\n '
self._suspend = suspend<|docstring|>Sets the suspend of this V1alpha1CronWorkflowSpec.
Suspend is a flag that will stop new CronWorkflows from running if set to true # noqa: E501
:param suspend: The suspend of this V1alpha1CronWorkflowSpec. # noqa: E501
:type: bool<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.