repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
dmbee/seglearn | seglearn/pipe.py | Pype.predict_log_proba | def predict_log_proba(self, X):
"""
Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_log_proba(Xt) | python | def predict_log_proba(self, X):
"""
Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_log_proba(Xt) | [
"def",
"predict_log_proba",
"(",
"self",
",",
"X",
")",
":",
"Xt",
",",
"_",
",",
"_",
"=",
"self",
".",
"_transform",
"(",
"X",
")",
"return",
"self",
".",
"_final_estimator",
".",
"predict_log_proba",
"(",
"Xt",
")"
] | Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes] | [
"Apply",
"transforms",
"and",
"predict_log_proba",
"of",
"the",
"final",
"estimator"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L327-L342 | train | 232,500 |
dmbee/seglearn | seglearn/feature_functions.py | base_features | def base_features():
''' Returns dictionary of some basic features that can be calculated for segmented time
series data '''
features = {'mean': mean,
'median': median,
'abs_energy': abs_energy,
'std': std,
'var': var,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mse': mse,
'mnx': mean_crossings}
return features | python | def base_features():
''' Returns dictionary of some basic features that can be calculated for segmented time
series data '''
features = {'mean': mean,
'median': median,
'abs_energy': abs_energy,
'std': std,
'var': var,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mse': mse,
'mnx': mean_crossings}
return features | [
"def",
"base_features",
"(",
")",
":",
"features",
"=",
"{",
"'mean'",
":",
"mean",
",",
"'median'",
":",
"median",
",",
"'abs_energy'",
":",
"abs_energy",
",",
"'std'",
":",
"std",
",",
"'var'",
":",
"var",
",",
"'min'",
":",
"minimum",
",",
"'max'",
":",
"maximum",
",",
"'skew'",
":",
"skew",
",",
"'kurt'",
":",
"kurt",
",",
"'mse'",
":",
"mse",
",",
"'mnx'",
":",
"mean_crossings",
"}",
"return",
"features"
] | Returns dictionary of some basic features that can be calculated for segmented time
series data | [
"Returns",
"dictionary",
"of",
"some",
"basic",
"features",
"that",
"can",
"be",
"calculated",
"for",
"segmented",
"time",
"series",
"data"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L37-L51 | train | 232,501 |
dmbee/seglearn | seglearn/feature_functions.py | all_features | def all_features():
''' Returns dictionary of all features in the module
.. note:: Some of the features (hist4, corr) are relatively expensive to compute
'''
features = {'mean': mean,
'median': median,
'gmean': gmean,
'hmean': hmean,
'vec_sum': vec_sum,
'abs_sum': abs_sum,
'abs_energy': abs_energy,
'std': std,
'var': var,
'variation': variation,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mean_diff': mean_diff,
'mean_abs_diff': means_abs_diff,
'mse': mse,
'mnx': mean_crossings,
'hist4': hist(),
'corr': corr2,
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(),
'slope_sign_changes': slope_sign_changes(),
'waveform_length': waveform_length,
'emg_var': emg_var,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude()}
return features | python | def all_features():
''' Returns dictionary of all features in the module
.. note:: Some of the features (hist4, corr) are relatively expensive to compute
'''
features = {'mean': mean,
'median': median,
'gmean': gmean,
'hmean': hmean,
'vec_sum': vec_sum,
'abs_sum': abs_sum,
'abs_energy': abs_energy,
'std': std,
'var': var,
'variation': variation,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mean_diff': mean_diff,
'mean_abs_diff': means_abs_diff,
'mse': mse,
'mnx': mean_crossings,
'hist4': hist(),
'corr': corr2,
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(),
'slope_sign_changes': slope_sign_changes(),
'waveform_length': waveform_length,
'emg_var': emg_var,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude()}
return features | [
"def",
"all_features",
"(",
")",
":",
"features",
"=",
"{",
"'mean'",
":",
"mean",
",",
"'median'",
":",
"median",
",",
"'gmean'",
":",
"gmean",
",",
"'hmean'",
":",
"hmean",
",",
"'vec_sum'",
":",
"vec_sum",
",",
"'abs_sum'",
":",
"abs_sum",
",",
"'abs_energy'",
":",
"abs_energy",
",",
"'std'",
":",
"std",
",",
"'var'",
":",
"var",
",",
"'variation'",
":",
"variation",
",",
"'min'",
":",
"minimum",
",",
"'max'",
":",
"maximum",
",",
"'skew'",
":",
"skew",
",",
"'kurt'",
":",
"kurt",
",",
"'mean_diff'",
":",
"mean_diff",
",",
"'mean_abs_diff'",
":",
"means_abs_diff",
",",
"'mse'",
":",
"mse",
",",
"'mnx'",
":",
"mean_crossings",
",",
"'hist4'",
":",
"hist",
"(",
")",
",",
"'corr'",
":",
"corr2",
",",
"'mean_abs_value'",
":",
"mean_abs",
",",
"'zero_crossings'",
":",
"zero_crossing",
"(",
")",
",",
"'slope_sign_changes'",
":",
"slope_sign_changes",
"(",
")",
",",
"'waveform_length'",
":",
"waveform_length",
",",
"'emg_var'",
":",
"emg_var",
",",
"'root_mean_square'",
":",
"root_mean_square",
",",
"'willison_amplitude'",
":",
"willison_amplitude",
"(",
")",
"}",
"return",
"features"
] | Returns dictionary of all features in the module
.. note:: Some of the features (hist4, corr) are relatively expensive to compute | [
"Returns",
"dictionary",
"of",
"all",
"features",
"in",
"the",
"module"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L54-L86 | train | 232,502 |
dmbee/seglearn | seglearn/feature_functions.py | emg_features | def emg_features(threshold=0):
'''Return a dictionary of popular features used for EMG time series classification.'''
return {
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(threshold),
'slope_sign_changes': slope_sign_changes(threshold),
'waveform_length': waveform_length,
'integrated_emg': abs_sum,
'emg_var': emg_var,
'simple square integral': abs_energy,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude(threshold),
} | python | def emg_features(threshold=0):
'''Return a dictionary of popular features used for EMG time series classification.'''
return {
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(threshold),
'slope_sign_changes': slope_sign_changes(threshold),
'waveform_length': waveform_length,
'integrated_emg': abs_sum,
'emg_var': emg_var,
'simple square integral': abs_energy,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude(threshold),
} | [
"def",
"emg_features",
"(",
"threshold",
"=",
"0",
")",
":",
"return",
"{",
"'mean_abs_value'",
":",
"mean_abs",
",",
"'zero_crossings'",
":",
"zero_crossing",
"(",
"threshold",
")",
",",
"'slope_sign_changes'",
":",
"slope_sign_changes",
"(",
"threshold",
")",
",",
"'waveform_length'",
":",
"waveform_length",
",",
"'integrated_emg'",
":",
"abs_sum",
",",
"'emg_var'",
":",
"emg_var",
",",
"'simple square integral'",
":",
"abs_energy",
",",
"'root_mean_square'",
":",
"root_mean_square",
",",
"'willison_amplitude'",
":",
"willison_amplitude",
"(",
"threshold",
")",
",",
"}"
] | Return a dictionary of popular features used for EMG time series classification. | [
"Return",
"a",
"dictionary",
"of",
"popular",
"features",
"used",
"for",
"EMG",
"time",
"series",
"classification",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L99-L111 | train | 232,503 |
dmbee/seglearn | seglearn/feature_functions.py | means_abs_diff | def means_abs_diff(X):
''' mean absolute temporal derivative '''
return np.mean(np.abs(np.diff(X, axis=1)), axis=1) | python | def means_abs_diff(X):
''' mean absolute temporal derivative '''
return np.mean(np.abs(np.diff(X, axis=1)), axis=1) | [
"def",
"means_abs_diff",
"(",
"X",
")",
":",
"return",
"np",
".",
"mean",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"X",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
")"
] | mean absolute temporal derivative | [
"mean",
"absolute",
"temporal",
"derivative"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L189-L191 | train | 232,504 |
dmbee/seglearn | seglearn/feature_functions.py | mse | def mse(X):
''' computes mean spectral energy for each variable in a segmented time series '''
return np.mean(np.square(np.abs(np.fft.fft(X, axis=1))), axis=1) | python | def mse(X):
''' computes mean spectral energy for each variable in a segmented time series '''
return np.mean(np.square(np.abs(np.fft.fft(X, axis=1))), axis=1) | [
"def",
"mse",
"(",
"X",
")",
":",
"return",
"np",
".",
"mean",
"(",
"np",
".",
"square",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"fft",
".",
"fft",
"(",
"X",
",",
"axis",
"=",
"1",
")",
")",
")",
",",
"axis",
"=",
"1",
")"
] | computes mean spectral energy for each variable in a segmented time series | [
"computes",
"mean",
"spectral",
"energy",
"for",
"each",
"variable",
"in",
"a",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L194-L196 | train | 232,505 |
dmbee/seglearn | seglearn/feature_functions.py | mean_crossings | def mean_crossings(X):
''' Computes number of mean crossings for each variable in a segmented time series '''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
mnx = np.zeros((N, D))
for i in range(D):
pos = X[:, :, i] > 0
npos = ~pos
c = (pos[:, :-1] & npos[:, 1:]) | (npos[:, :-1] & pos[:, 1:])
mnx[:, i] = np.count_nonzero(c, axis=1)
return mnx | python | def mean_crossings(X):
''' Computes number of mean crossings for each variable in a segmented time series '''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
mnx = np.zeros((N, D))
for i in range(D):
pos = X[:, :, i] > 0
npos = ~pos
c = (pos[:, :-1] & npos[:, 1:]) | (npos[:, :-1] & pos[:, 1:])
mnx[:, i] = np.count_nonzero(c, axis=1)
return mnx | [
"def",
"mean_crossings",
"(",
"X",
")",
":",
"X",
"=",
"np",
".",
"atleast_3d",
"(",
"X",
")",
"N",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"D",
"=",
"X",
".",
"shape",
"[",
"2",
"]",
"mnx",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"D",
")",
")",
"for",
"i",
"in",
"range",
"(",
"D",
")",
":",
"pos",
"=",
"X",
"[",
":",
",",
":",
",",
"i",
"]",
">",
"0",
"npos",
"=",
"~",
"pos",
"c",
"=",
"(",
"pos",
"[",
":",
",",
":",
"-",
"1",
"]",
"&",
"npos",
"[",
":",
",",
"1",
":",
"]",
")",
"|",
"(",
"npos",
"[",
":",
",",
":",
"-",
"1",
"]",
"&",
"pos",
"[",
":",
",",
"1",
":",
"]",
")",
"mnx",
"[",
":",
",",
"i",
"]",
"=",
"np",
".",
"count_nonzero",
"(",
"c",
",",
"axis",
"=",
"1",
")",
"return",
"mnx"
] | Computes number of mean crossings for each variable in a segmented time series | [
"Computes",
"number",
"of",
"mean",
"crossings",
"for",
"each",
"variable",
"in",
"a",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L199-L210 | train | 232,506 |
dmbee/seglearn | seglearn/feature_functions.py | corr2 | def corr2(X):
''' computes correlations between all variable pairs in a segmented time series
.. note:: this feature is expensive to compute with the current implementation, and cannot be
used with univariate time series
'''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
if D == 1:
return np.zeros(N, dtype=np.float)
trii = np.triu_indices(D, k=1)
DD = len(trii[0])
r = np.zeros((N, DD))
for i in np.arange(N):
rmat = np.corrcoef(X[i]) # get the ith window from each signal, result will be DxD
r[i] = rmat[trii]
return r | python | def corr2(X):
''' computes correlations between all variable pairs in a segmented time series
.. note:: this feature is expensive to compute with the current implementation, and cannot be
used with univariate time series
'''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
if D == 1:
return np.zeros(N, dtype=np.float)
trii = np.triu_indices(D, k=1)
DD = len(trii[0])
r = np.zeros((N, DD))
for i in np.arange(N):
rmat = np.corrcoef(X[i]) # get the ith window from each signal, result will be DxD
r[i] = rmat[trii]
return r | [
"def",
"corr2",
"(",
"X",
")",
":",
"X",
"=",
"np",
".",
"atleast_3d",
"(",
"X",
")",
"N",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"D",
"=",
"X",
".",
"shape",
"[",
"2",
"]",
"if",
"D",
"==",
"1",
":",
"return",
"np",
".",
"zeros",
"(",
"N",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"trii",
"=",
"np",
".",
"triu_indices",
"(",
"D",
",",
"k",
"=",
"1",
")",
"DD",
"=",
"len",
"(",
"trii",
"[",
"0",
"]",
")",
"r",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"DD",
")",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
":",
"rmat",
"=",
"np",
".",
"corrcoef",
"(",
"X",
"[",
"i",
"]",
")",
"# get the ith window from each signal, result will be DxD",
"r",
"[",
"i",
"]",
"=",
"rmat",
"[",
"trii",
"]",
"return",
"r"
] | computes correlations between all variable pairs in a segmented time series
.. note:: this feature is expensive to compute with the current implementation, and cannot be
used with univariate time series | [
"computes",
"correlations",
"between",
"all",
"variable",
"pairs",
"in",
"a",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L241-L260 | train | 232,507 |
dmbee/seglearn | seglearn/feature_functions.py | waveform_length | def waveform_length(X):
''' cumulative length of the waveform over a segment for each variable in the segmented time
series '''
return np.sum(np.abs(np.diff(X, axis=1)), axis=1) | python | def waveform_length(X):
''' cumulative length of the waveform over a segment for each variable in the segmented time
series '''
return np.sum(np.abs(np.diff(X, axis=1)), axis=1) | [
"def",
"waveform_length",
"(",
"X",
")",
":",
"return",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"X",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
")"
] | cumulative length of the waveform over a segment for each variable in the segmented time
series | [
"cumulative",
"length",
"of",
"the",
"waveform",
"over",
"a",
"segment",
"for",
"each",
"variable",
"in",
"the",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L299-L302 | train | 232,508 |
dmbee/seglearn | seglearn/feature_functions.py | root_mean_square | def root_mean_square(X):
''' root mean square for each variable in the segmented time series '''
segment_width = X.shape[1]
return np.sqrt(np.sum(X * X, axis=1) / segment_width) | python | def root_mean_square(X):
''' root mean square for each variable in the segmented time series '''
segment_width = X.shape[1]
return np.sqrt(np.sum(X * X, axis=1) / segment_width) | [
"def",
"root_mean_square",
"(",
"X",
")",
":",
"segment_width",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"X",
"*",
"X",
",",
"axis",
"=",
"1",
")",
"/",
"segment_width",
")"
] | root mean square for each variable in the segmented time series | [
"root",
"mean",
"square",
"for",
"each",
"variable",
"in",
"the",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L305-L308 | train | 232,509 |
dmbee/seglearn | seglearn/split.py | TemporalKFold.split | def split(self, X, y):
'''
Splits time series data and target arrays, and generates splitting indices
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ]
target vector
Returns
-------
X : array-like, shape [n_series * n_splits, ]
Split time series data and contextual data
y : array-like, shape [n_series * n_splits]
Split target data
cv : list, shape [2, n_splits]
Splitting indices
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
Ns = len(Xt)
Xt_new, y_new = self._ts_slice(Xt, y)
if Xc is not None:
Xc_new = np.concatenate([Xc] * self.n_splits)
X_new = TS_Data(Xt_new, Xc_new)
else:
X_new = np.array(Xt_new)
cv = self._make_indices(Ns)
return X_new, y_new, cv | python | def split(self, X, y):
'''
Splits time series data and target arrays, and generates splitting indices
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ]
target vector
Returns
-------
X : array-like, shape [n_series * n_splits, ]
Split time series data and contextual data
y : array-like, shape [n_series * n_splits]
Split target data
cv : list, shape [2, n_splits]
Splitting indices
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
Ns = len(Xt)
Xt_new, y_new = self._ts_slice(Xt, y)
if Xc is not None:
Xc_new = np.concatenate([Xc] * self.n_splits)
X_new = TS_Data(Xt_new, Xc_new)
else:
X_new = np.array(Xt_new)
cv = self._make_indices(Ns)
return X_new, y_new, cv | [
"def",
"split",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"check_ts_data",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"Ns",
"=",
"len",
"(",
"Xt",
")",
"Xt_new",
",",
"y_new",
"=",
"self",
".",
"_ts_slice",
"(",
"Xt",
",",
"y",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xc_new",
"=",
"np",
".",
"concatenate",
"(",
"[",
"Xc",
"]",
"*",
"self",
".",
"n_splits",
")",
"X_new",
"=",
"TS_Data",
"(",
"Xt_new",
",",
"Xc_new",
")",
"else",
":",
"X_new",
"=",
"np",
".",
"array",
"(",
"Xt_new",
")",
"cv",
"=",
"self",
".",
"_make_indices",
"(",
"Ns",
")",
"return",
"X_new",
",",
"y_new",
",",
"cv"
] | Splits time series data and target arrays, and generates splitting indices
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ]
target vector
Returns
-------
X : array-like, shape [n_series * n_splits, ]
Split time series data and contextual data
y : array-like, shape [n_series * n_splits]
Split target data
cv : list, shape [2, n_splits]
Splitting indices | [
"Splits",
"time",
"series",
"data",
"and",
"target",
"arrays",
"and",
"generates",
"splitting",
"indices"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L56-L90 | train | 232,510 |
dmbee/seglearn | seglearn/split.py | TemporalKFold._ts_slice | def _ts_slice(self, Xt, y):
''' takes time series data, and splits each series into temporal folds '''
Ns = len(Xt)
Xt_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(Xt[j]) / self.n_splits)
Xt_new.append(Xt[j][(Njs * i):(Njs * (i + 1))])
Xt_new = np.array(Xt_new)
if len(np.atleast_1d(y[0])) == len(Xt[0]):
# y is a time series
y_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(y[j]) / self.n_splits)
y_new.append(y[j][(Njs * i):(Njs * (i + 1))])
y_new = np.array(y_new)
else:
# y is contextual to each series
y_new = np.concatenate([y for i in range(self.n_splits)])
return Xt_new, y_new | python | def _ts_slice(self, Xt, y):
''' takes time series data, and splits each series into temporal folds '''
Ns = len(Xt)
Xt_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(Xt[j]) / self.n_splits)
Xt_new.append(Xt[j][(Njs * i):(Njs * (i + 1))])
Xt_new = np.array(Xt_new)
if len(np.atleast_1d(y[0])) == len(Xt[0]):
# y is a time series
y_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(y[j]) / self.n_splits)
y_new.append(y[j][(Njs * i):(Njs * (i + 1))])
y_new = np.array(y_new)
else:
# y is contextual to each series
y_new = np.concatenate([y for i in range(self.n_splits)])
return Xt_new, y_new | [
"def",
"_ts_slice",
"(",
"self",
",",
"Xt",
",",
"y",
")",
":",
"Ns",
"=",
"len",
"(",
"Xt",
")",
"Xt_new",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
":",
"for",
"j",
"in",
"range",
"(",
"Ns",
")",
":",
"Njs",
"=",
"int",
"(",
"len",
"(",
"Xt",
"[",
"j",
"]",
")",
"/",
"self",
".",
"n_splits",
")",
"Xt_new",
".",
"append",
"(",
"Xt",
"[",
"j",
"]",
"[",
"(",
"Njs",
"*",
"i",
")",
":",
"(",
"Njs",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
")",
"Xt_new",
"=",
"np",
".",
"array",
"(",
"Xt_new",
")",
"if",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"y",
"[",
"0",
"]",
")",
")",
"==",
"len",
"(",
"Xt",
"[",
"0",
"]",
")",
":",
"# y is a time series",
"y_new",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
":",
"for",
"j",
"in",
"range",
"(",
"Ns",
")",
":",
"Njs",
"=",
"int",
"(",
"len",
"(",
"y",
"[",
"j",
"]",
")",
"/",
"self",
".",
"n_splits",
")",
"y_new",
".",
"append",
"(",
"y",
"[",
"j",
"]",
"[",
"(",
"Njs",
"*",
"i",
")",
":",
"(",
"Njs",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
")",
"y_new",
"=",
"np",
".",
"array",
"(",
"y_new",
")",
"else",
":",
"# y is contextual to each series",
"y_new",
"=",
"np",
".",
"concatenate",
"(",
"[",
"y",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
")",
"return",
"Xt_new",
",",
"y_new"
] | takes time series data, and splits each series into temporal folds | [
"takes",
"time",
"series",
"data",
"and",
"splits",
"each",
"series",
"into",
"temporal",
"folds"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L92-L114 | train | 232,511 |
dmbee/seglearn | seglearn/split.py | TemporalKFold._make_indices | def _make_indices(self, Ns):
''' makes indices for cross validation '''
N_new = int(Ns * self.n_splits)
test = [np.full(N_new, False) for i in range(self.n_splits)]
for i in range(self.n_splits):
test[i][np.arange(Ns * i, Ns * (i + 1))] = True
train = [np.logical_not(test[i]) for i in range(self.n_splits)]
test = [np.arange(N_new)[test[i]] for i in range(self.n_splits)]
train = [np.arange(N_new)[train[i]] for i in range(self.n_splits)]
cv = list(zip(train, test))
return cv | python | def _make_indices(self, Ns):
''' makes indices for cross validation '''
N_new = int(Ns * self.n_splits)
test = [np.full(N_new, False) for i in range(self.n_splits)]
for i in range(self.n_splits):
test[i][np.arange(Ns * i, Ns * (i + 1))] = True
train = [np.logical_not(test[i]) for i in range(self.n_splits)]
test = [np.arange(N_new)[test[i]] for i in range(self.n_splits)]
train = [np.arange(N_new)[train[i]] for i in range(self.n_splits)]
cv = list(zip(train, test))
return cv | [
"def",
"_make_indices",
"(",
"self",
",",
"Ns",
")",
":",
"N_new",
"=",
"int",
"(",
"Ns",
"*",
"self",
".",
"n_splits",
")",
"test",
"=",
"[",
"np",
".",
"full",
"(",
"N_new",
",",
"False",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
":",
"test",
"[",
"i",
"]",
"[",
"np",
".",
"arange",
"(",
"Ns",
"*",
"i",
",",
"Ns",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
"=",
"True",
"train",
"=",
"[",
"np",
".",
"logical_not",
"(",
"test",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"test",
"=",
"[",
"np",
".",
"arange",
"(",
"N_new",
")",
"[",
"test",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"train",
"=",
"[",
"np",
".",
"arange",
"(",
"N_new",
")",
"[",
"train",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"cv",
"=",
"list",
"(",
"zip",
"(",
"train",
",",
"test",
")",
")",
"return",
"cv"
] | makes indices for cross validation | [
"makes",
"indices",
"for",
"cross",
"validation"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L116-L129 | train | 232,512 |
dmbee/seglearn | seglearn/preprocessing.py | TargetRunLengthEncoder.transform | def transform(self, X, y, sample_weight=None):
'''
Transforms the time series data with run length encoding of the target variable
Note this transformation changes the number of samples in the data
If sample_weight is provided, it is transformed to align to the new target encoding
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ...]
target variable encoded as a time series
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_rle_series, ]
transformed time series data
yt : array-like, shape [n_rle_series]
target values for each series
sample_weight_new : array-like shape [n_rle_series]
sample weights
'''
check_ts_data_with_ts_target(X, y)
Xt, Xc = get_ts_data_parts(X)
N = len(Xt) # number of time series
# transformed data
yt = []
Xtt = []
swt = sample_weight
Nt = []
for i in range(N):
Xi, yi = self._transform(Xt[i], y[i])
yt+=yi
Xtt+=Xi
Nt.append(len(yi)) # number of contiguous class instances
if Xc is not None:
Xct = expand_variables_to_segments(Xc, Nt)
Xtt = TS_Data(Xtt, Xct)
if sample_weight is not None:
swt = expand_variables_to_segments(sample_weight, Nt)
return Xtt, yt, swt | python | def transform(self, X, y, sample_weight=None):
'''
Transforms the time series data with run length encoding of the target variable
Note this transformation changes the number of samples in the data
If sample_weight is provided, it is transformed to align to the new target encoding
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ...]
target variable encoded as a time series
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_rle_series, ]
transformed time series data
yt : array-like, shape [n_rle_series]
target values for each series
sample_weight_new : array-like shape [n_rle_series]
sample weights
'''
check_ts_data_with_ts_target(X, y)
Xt, Xc = get_ts_data_parts(X)
N = len(Xt) # number of time series
# transformed data
yt = []
Xtt = []
swt = sample_weight
Nt = []
for i in range(N):
Xi, yi = self._transform(Xt[i], y[i])
yt+=yi
Xtt+=Xi
Nt.append(len(yi)) # number of contiguous class instances
if Xc is not None:
Xct = expand_variables_to_segments(Xc, Nt)
Xtt = TS_Data(Xtt, Xct)
if sample_weight is not None:
swt = expand_variables_to_segments(sample_weight, Nt)
return Xtt, yt, swt | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
",",
"sample_weight",
"=",
"None",
")",
":",
"check_ts_data_with_ts_target",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"N",
"=",
"len",
"(",
"Xt",
")",
"# number of time series",
"# transformed data",
"yt",
"=",
"[",
"]",
"Xtt",
"=",
"[",
"]",
"swt",
"=",
"sample_weight",
"Nt",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"Xi",
",",
"yi",
"=",
"self",
".",
"_transform",
"(",
"Xt",
"[",
"i",
"]",
",",
"y",
"[",
"i",
"]",
")",
"yt",
"+=",
"yi",
"Xtt",
"+=",
"Xi",
"Nt",
".",
"append",
"(",
"len",
"(",
"yi",
")",
")",
"# number of contiguous class instances",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xct",
"=",
"expand_variables_to_segments",
"(",
"Xc",
",",
"Nt",
")",
"Xtt",
"=",
"TS_Data",
"(",
"Xtt",
",",
"Xct",
")",
"if",
"sample_weight",
"is",
"not",
"None",
":",
"swt",
"=",
"expand_variables_to_segments",
"(",
"sample_weight",
",",
"Nt",
")",
"return",
"Xtt",
",",
"yt",
",",
"swt"
] | Transforms the time series data with run length encoding of the target variable
Note this transformation changes the number of samples in the data
If sample_weight is provided, it is transformed to align to the new target encoding
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ...]
target variable encoded as a time series
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_rle_series, ]
transformed time series data
yt : array-like, shape [n_rle_series]
target values for each series
sample_weight_new : array-like shape [n_rle_series]
sample weights | [
"Transforms",
"the",
"time",
"series",
"data",
"with",
"run",
"length",
"encoding",
"of",
"the",
"target",
"variable",
"Note",
"this",
"transformation",
"changes",
"the",
"number",
"of",
"samples",
"in",
"the",
"data",
"If",
"sample_weight",
"is",
"provided",
"it",
"is",
"transformed",
"to",
"align",
"to",
"the",
"new",
"target",
"encoding"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/preprocessing.py#L66-L115 | train | 232,513 |
dmbee/seglearn | seglearn/preprocessing.py | TargetRunLengthEncoder._rle | def _rle(self, a):
'''
rle implementation credit to Thomas Browne from his SOF post Sept 2015
Parameters
----------
a : array, shape[n,]
input vector
Returns
-------
z : array, shape[nt,]
run lengths
p : array, shape[nt,]
start positions of each run
ar : array, shape[nt,]
values for each run
'''
ia = np.asarray(a)
n = len(ia)
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return (z, p, ia[i]) | python | def _rle(self, a):
'''
rle implementation credit to Thomas Browne from his SOF post Sept 2015
Parameters
----------
a : array, shape[n,]
input vector
Returns
-------
z : array, shape[nt,]
run lengths
p : array, shape[nt,]
start positions of each run
ar : array, shape[nt,]
values for each run
'''
ia = np.asarray(a)
n = len(ia)
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return (z, p, ia[i]) | [
"def",
"_rle",
"(",
"self",
",",
"a",
")",
":",
"ia",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"n",
"=",
"len",
"(",
"ia",
")",
"y",
"=",
"np",
".",
"array",
"(",
"ia",
"[",
"1",
":",
"]",
"!=",
"ia",
"[",
":",
"-",
"1",
"]",
")",
"# pairwise unequal (string safe)",
"i",
"=",
"np",
".",
"append",
"(",
"np",
".",
"where",
"(",
"y",
")",
",",
"n",
"-",
"1",
")",
"# must include last element posi",
"z",
"=",
"np",
".",
"diff",
"(",
"np",
".",
"append",
"(",
"-",
"1",
",",
"i",
")",
")",
"# run lengths",
"p",
"=",
"np",
".",
"cumsum",
"(",
"np",
".",
"append",
"(",
"0",
",",
"z",
")",
")",
"[",
":",
"-",
"1",
"]",
"# positions",
"return",
"(",
"z",
",",
"p",
",",
"ia",
"[",
"i",
"]",
")"
] | rle implementation credit to Thomas Browne from his SOF post Sept 2015
Parameters
----------
a : array, shape[n,]
input vector
Returns
-------
z : array, shape[nt,]
run lengths
p : array, shape[nt,]
start positions of each run
ar : array, shape[nt,]
values for each run | [
"rle",
"implementation",
"credit",
"to",
"Thomas",
"Browne",
"from",
"his",
"SOF",
"post",
"Sept",
"2015"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/preprocessing.py#L117-L141 | train | 232,514 |
dmbee/seglearn | seglearn/preprocessing.py | TargetRunLengthEncoder._transform | def _transform(self, X, y):
'''
Transforms single series
'''
z, p, y_rle = self._rle(y)
p = np.append(p, len(y))
big_enough = p[1:] - p[:-1] >= self.min_length
Xt = []
for i in range(len(y_rle)):
if (big_enough[i]):
Xt.append(X[p[i]:p[i+1]])
yt = y_rle[big_enough].tolist()
return Xt, yt | python | def _transform(self, X, y):
'''
Transforms single series
'''
z, p, y_rle = self._rle(y)
p = np.append(p, len(y))
big_enough = p[1:] - p[:-1] >= self.min_length
Xt = []
for i in range(len(y_rle)):
if (big_enough[i]):
Xt.append(X[p[i]:p[i+1]])
yt = y_rle[big_enough].tolist()
return Xt, yt | [
"def",
"_transform",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"z",
",",
"p",
",",
"y_rle",
"=",
"self",
".",
"_rle",
"(",
"y",
")",
"p",
"=",
"np",
".",
"append",
"(",
"p",
",",
"len",
"(",
"y",
")",
")",
"big_enough",
"=",
"p",
"[",
"1",
":",
"]",
"-",
"p",
"[",
":",
"-",
"1",
"]",
">=",
"self",
".",
"min_length",
"Xt",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"y_rle",
")",
")",
":",
"if",
"(",
"big_enough",
"[",
"i",
"]",
")",
":",
"Xt",
".",
"append",
"(",
"X",
"[",
"p",
"[",
"i",
"]",
":",
"p",
"[",
"i",
"+",
"1",
"]",
"]",
")",
"yt",
"=",
"y_rle",
"[",
"big_enough",
"]",
".",
"tolist",
"(",
")",
"return",
"Xt",
",",
"yt"
] | Transforms single series | [
"Transforms",
"single",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/preprocessing.py#L143-L157 | train | 232,515 |
dmbee/seglearn | seglearn/util.py | get_ts_data_parts | def get_ts_data_parts(X):
'''
Separates time series data object into time series variables and contextual variables
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_series, ]
Time series data
Xs : array-like, shape [n_series, n_contextd = np.colum _variables]
contextual variables
'''
if not isinstance(X, TS_Data):
return X, None
return X.ts_data, X.context_data | python | def get_ts_data_parts(X):
'''
Separates time series data object into time series variables and contextual variables
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_series, ]
Time series data
Xs : array-like, shape [n_series, n_contextd = np.colum _variables]
contextual variables
'''
if not isinstance(X, TS_Data):
return X, None
return X.ts_data, X.context_data | [
"def",
"get_ts_data_parts",
"(",
"X",
")",
":",
"if",
"not",
"isinstance",
"(",
"X",
",",
"TS_Data",
")",
":",
"return",
"X",
",",
"None",
"return",
"X",
".",
"ts_data",
",",
"X",
".",
"context_data"
] | Separates time series data object into time series variables and contextual variables
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_series, ]
Time series data
Xs : array-like, shape [n_series, n_contextd = np.colum _variables]
contextual variables | [
"Separates",
"time",
"series",
"data",
"object",
"into",
"time",
"series",
"variables",
"and",
"contextual",
"variables"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/util.py#L13-L32 | train | 232,516 |
dmbee/seglearn | seglearn/util.py | check_ts_data_with_ts_target | def check_ts_data_with_ts_target(X, y=None):
'''
Checks time series data with time series target is good. If not raises value error.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series, ...]
target data
'''
if y is not None:
Nx = len(X)
Ny = len(y)
if Nx != Ny:
raise ValueError("Number of time series different in X (%d) and y (%d)"
% (Nx, Ny))
Xt, _ = get_ts_data_parts(X)
Ntx = np.array([len(Xt[i]) for i in np.arange(Nx)])
Nty = np.array([len(np.atleast_1d(y[i])) for i in np.arange(Nx)])
if np.count_nonzero(Nty == Ntx) == Nx:
return
else:
raise ValueError("Invalid time series lengths.\n"
"Ns: ", Nx,
"Ntx: ", Ntx,
"Nty: ", Nty) | python | def check_ts_data_with_ts_target(X, y=None):
'''
Checks time series data with time series target is good. If not raises value error.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series, ...]
target data
'''
if y is not None:
Nx = len(X)
Ny = len(y)
if Nx != Ny:
raise ValueError("Number of time series different in X (%d) and y (%d)"
% (Nx, Ny))
Xt, _ = get_ts_data_parts(X)
Ntx = np.array([len(Xt[i]) for i in np.arange(Nx)])
Nty = np.array([len(np.atleast_1d(y[i])) for i in np.arange(Nx)])
if np.count_nonzero(Nty == Ntx) == Nx:
return
else:
raise ValueError("Invalid time series lengths.\n"
"Ns: ", Nx,
"Ntx: ", Ntx,
"Nty: ", Nty) | [
"def",
"check_ts_data_with_ts_target",
"(",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"y",
"is",
"not",
"None",
":",
"Nx",
"=",
"len",
"(",
"X",
")",
"Ny",
"=",
"len",
"(",
"y",
")",
"if",
"Nx",
"!=",
"Ny",
":",
"raise",
"ValueError",
"(",
"\"Number of time series different in X (%d) and y (%d)\"",
"%",
"(",
"Nx",
",",
"Ny",
")",
")",
"Xt",
",",
"_",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"Ntx",
"=",
"np",
".",
"array",
"(",
"[",
"len",
"(",
"Xt",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"Nx",
")",
"]",
")",
"Nty",
"=",
"np",
".",
"array",
"(",
"[",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"y",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"Nx",
")",
"]",
")",
"if",
"np",
".",
"count_nonzero",
"(",
"Nty",
"==",
"Ntx",
")",
"==",
"Nx",
":",
"return",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid time series lengths.\\n\"",
"\"Ns: \"",
",",
"Nx",
",",
"\"Ntx: \"",
",",
"Ntx",
",",
"\"Nty: \"",
",",
"Nty",
")"
] | Checks time series data with time series target is good. If not raises value error.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series, ...]
target data | [
"Checks",
"time",
"series",
"data",
"with",
"time",
"series",
"target",
"is",
"good",
".",
"If",
"not",
"raises",
"value",
"error",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/util.py#L67-L96 | train | 232,517 |
dmbee/seglearn | seglearn/util.py | ts_stats | def ts_stats(Xt, y, fs=1.0, class_labels=None):
'''
Generates some helpful statistics about the data X
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series]
target data
fs : float
sampling frequency
class_labels : list of strings, default None
List of target class names
Returns
-------
results : dict
| Dictionary of relevant statistics for the time series data
| results['total'] has stats for the whole data set
| results['by_class'] has stats segragated by target class
'''
check_ts_data(Xt)
Xt, Xs = get_ts_data_parts(Xt)
if Xs is not None:
S = len(np.atleast_1d(Xs[0]))
else:
S = 0
C = np.max(y) + 1 # number of classes
if class_labels is None:
class_labels = np.arange(C)
N = len(Xt)
if Xt[0].ndim > 1:
D = Xt[0].shape[1]
else:
D = 1
Ti = np.array([Xt[i].shape[0] for i in range(N)], dtype=np.float64) / fs
ic = np.array([y == i for i in range(C)])
Tic = [Ti[ic[i]] for i in range(C)]
T = np.sum(Ti)
total = {"n_series": N, "n_classes": C, "n_TS_vars": D, "n_context_vars": S, "Total_Time": T,
"Series_Time_Mean": np.mean(Ti),
"Series_Time_Std": np.std(Ti),
"Series_Time_Range": (np.min(Ti), np.max(Ti))}
by_class = {"Class_labels": class_labels,
"n_series": np.array([len(Tic[i]) for i in range(C)]),
"Total_Time": np.array([np.sum(Tic[i]) for i in range(C)]),
"Series_Time_Mean": np.array([np.mean(Tic[i]) for i in range(C)]),
"Series_Time_Std": np.array([np.std(Tic[i]) for i in range(C)]),
"Series_Time_Min": np.array([np.min(Tic[i]) for i in range(C)]),
"Series_Time_Max": np.array([np.max(Tic[i]) for i in range(C)])}
results = {'total': total,
'by_class': by_class}
return results | python | def ts_stats(Xt, y, fs=1.0, class_labels=None):
'''
Generates some helpful statistics about the data X
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series]
target data
fs : float
sampling frequency
class_labels : list of strings, default None
List of target class names
Returns
-------
results : dict
| Dictionary of relevant statistics for the time series data
| results['total'] has stats for the whole data set
| results['by_class'] has stats segragated by target class
'''
check_ts_data(Xt)
Xt, Xs = get_ts_data_parts(Xt)
if Xs is not None:
S = len(np.atleast_1d(Xs[0]))
else:
S = 0
C = np.max(y) + 1 # number of classes
if class_labels is None:
class_labels = np.arange(C)
N = len(Xt)
if Xt[0].ndim > 1:
D = Xt[0].shape[1]
else:
D = 1
Ti = np.array([Xt[i].shape[0] for i in range(N)], dtype=np.float64) / fs
ic = np.array([y == i for i in range(C)])
Tic = [Ti[ic[i]] for i in range(C)]
T = np.sum(Ti)
total = {"n_series": N, "n_classes": C, "n_TS_vars": D, "n_context_vars": S, "Total_Time": T,
"Series_Time_Mean": np.mean(Ti),
"Series_Time_Std": np.std(Ti),
"Series_Time_Range": (np.min(Ti), np.max(Ti))}
by_class = {"Class_labels": class_labels,
"n_series": np.array([len(Tic[i]) for i in range(C)]),
"Total_Time": np.array([np.sum(Tic[i]) for i in range(C)]),
"Series_Time_Mean": np.array([np.mean(Tic[i]) for i in range(C)]),
"Series_Time_Std": np.array([np.std(Tic[i]) for i in range(C)]),
"Series_Time_Min": np.array([np.min(Tic[i]) for i in range(C)]),
"Series_Time_Max": np.array([np.max(Tic[i]) for i in range(C)])}
results = {'total': total,
'by_class': by_class}
return results | [
"def",
"ts_stats",
"(",
"Xt",
",",
"y",
",",
"fs",
"=",
"1.0",
",",
"class_labels",
"=",
"None",
")",
":",
"check_ts_data",
"(",
"Xt",
")",
"Xt",
",",
"Xs",
"=",
"get_ts_data_parts",
"(",
"Xt",
")",
"if",
"Xs",
"is",
"not",
"None",
":",
"S",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"Xs",
"[",
"0",
"]",
")",
")",
"else",
":",
"S",
"=",
"0",
"C",
"=",
"np",
".",
"max",
"(",
"y",
")",
"+",
"1",
"# number of classes",
"if",
"class_labels",
"is",
"None",
":",
"class_labels",
"=",
"np",
".",
"arange",
"(",
"C",
")",
"N",
"=",
"len",
"(",
"Xt",
")",
"if",
"Xt",
"[",
"0",
"]",
".",
"ndim",
">",
"1",
":",
"D",
"=",
"Xt",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"else",
":",
"D",
"=",
"1",
"Ti",
"=",
"np",
".",
"array",
"(",
"[",
"Xt",
"[",
"i",
"]",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"N",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"/",
"fs",
"ic",
"=",
"np",
".",
"array",
"(",
"[",
"y",
"==",
"i",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
"Tic",
"=",
"[",
"Ti",
"[",
"ic",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
"T",
"=",
"np",
".",
"sum",
"(",
"Ti",
")",
"total",
"=",
"{",
"\"n_series\"",
":",
"N",
",",
"\"n_classes\"",
":",
"C",
",",
"\"n_TS_vars\"",
":",
"D",
",",
"\"n_context_vars\"",
":",
"S",
",",
"\"Total_Time\"",
":",
"T",
",",
"\"Series_Time_Mean\"",
":",
"np",
".",
"mean",
"(",
"Ti",
")",
",",
"\"Series_Time_Std\"",
":",
"np",
".",
"std",
"(",
"Ti",
")",
",",
"\"Series_Time_Range\"",
":",
"(",
"np",
".",
"min",
"(",
"Ti",
")",
",",
"np",
".",
"max",
"(",
"Ti",
")",
")",
"}",
"by_class",
"=",
"{",
"\"Class_labels\"",
":",
"class_labels",
",",
"\"n_series\"",
":",
"np",
".",
"array",
"(",
"[",
"len",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Total_Time\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"sum",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Mean\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"mean",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Std\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"std",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Min\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"min",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Max\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"max",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
"}",
"results",
"=",
"{",
"'total'",
":",
"total",
",",
"'by_class'",
":",
"by_class",
"}",
"return",
"results"
] | Generates some helpful statistics about the data X
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series]
target data
fs : float
sampling frequency
class_labels : list of strings, default None
List of target class names
Returns
-------
results : dict
| Dictionary of relevant statistics for the time series data
| results['total'] has stats for the whole data set
| results['by_class'] has stats segragated by target class | [
"Generates",
"some",
"helpful",
"statistics",
"about",
"the",
"data",
"X"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/util.py#L99-L163 | train | 232,518 |
dmbee/seglearn | seglearn/datasets.py | load_watch | def load_watch():
'''
Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was
recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a
smartwatch. It is a multivariate time series.
The study can be found here: https://arxiv.org/abs/1802.01489
Returns
-------
data : dict
data['X'] : list, length 140
| inertial sensor data, each element with shape [n_samples, 6]
| sampled at 50 Hz
data['y'] : array, length 140
target vector (exercise type)
data['side'] : array, length 140
the extremity side, 1 = right, 0 = left
data['subject'] : array, length 140
the subject (participant) number
data['X_labels'] : str list, length 6
ordered labels for the sensor data variables
data['y_labels'] :str list, length 7
ordered labels for the target (exercise type)
Examples
--------
>>> from seglearn.datasets import load_watch
>>> data = load_watch()
>>> print(data.keys())
'''
module_path = dirname(__file__)
data = np.load(module_path + "/data/watch_dataset.npy").item()
return data | python | def load_watch():
'''
Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was
recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a
smartwatch. It is a multivariate time series.
The study can be found here: https://arxiv.org/abs/1802.01489
Returns
-------
data : dict
data['X'] : list, length 140
| inertial sensor data, each element with shape [n_samples, 6]
| sampled at 50 Hz
data['y'] : array, length 140
target vector (exercise type)
data['side'] : array, length 140
the extremity side, 1 = right, 0 = left
data['subject'] : array, length 140
the subject (participant) number
data['X_labels'] : str list, length 6
ordered labels for the sensor data variables
data['y_labels'] :str list, length 7
ordered labels for the target (exercise type)
Examples
--------
>>> from seglearn.datasets import load_watch
>>> data = load_watch()
>>> print(data.keys())
'''
module_path = dirname(__file__)
data = np.load(module_path + "/data/watch_dataset.npy").item()
return data | [
"def",
"load_watch",
"(",
")",
":",
"module_path",
"=",
"dirname",
"(",
"__file__",
")",
"data",
"=",
"np",
".",
"load",
"(",
"module_path",
"+",
"\"/data/watch_dataset.npy\"",
")",
".",
"item",
"(",
")",
"return",
"data"
] | Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was
recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a
smartwatch. It is a multivariate time series.
The study can be found here: https://arxiv.org/abs/1802.01489
Returns
-------
data : dict
data['X'] : list, length 140
| inertial sensor data, each element with shape [n_samples, 6]
| sampled at 50 Hz
data['y'] : array, length 140
target vector (exercise type)
data['side'] : array, length 140
the extremity side, 1 = right, 0 = left
data['subject'] : array, length 140
the subject (participant) number
data['X_labels'] : str list, length 6
ordered labels for the sensor data variables
data['y_labels'] :str list, length 7
ordered labels for the target (exercise type)
Examples
--------
>>> from seglearn.datasets import load_watch
>>> data = load_watch()
>>> print(data.keys()) | [
"Loads",
"some",
"of",
"the",
"6",
"-",
"axis",
"inertial",
"sensor",
"data",
"from",
"my",
"smartwatch",
"project",
".",
"The",
"sensor",
"data",
"was",
"recorded",
"as",
"study",
"subjects",
"performed",
"sets",
"of",
"20",
"shoulder",
"exercise",
"repetitions",
"while",
"wearing",
"a",
"smartwatch",
".",
"It",
"is",
"a",
"multivariate",
"time",
"series",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/datasets.py#L13-L46 | train | 232,519 |
dmbee/seglearn | seglearn/transform.py | shuffle_data | def shuffle_data(X, y=None, sample_weight=None):
''' Shuffles indices X, y, and sample_weight together'''
if len(X) > 1:
ind = np.arange(len(X), dtype=np.int)
np.random.shuffle(ind)
Xt = X[ind]
yt = y
swt = sample_weight
if yt is not None:
yt = yt[ind]
if swt is not None:
swt = swt[ind]
return Xt, yt, swt
else:
return X, y, sample_weight | python | def shuffle_data(X, y=None, sample_weight=None):
''' Shuffles indices X, y, and sample_weight together'''
if len(X) > 1:
ind = np.arange(len(X), dtype=np.int)
np.random.shuffle(ind)
Xt = X[ind]
yt = y
swt = sample_weight
if yt is not None:
yt = yt[ind]
if swt is not None:
swt = swt[ind]
return Xt, yt, swt
else:
return X, y, sample_weight | [
"def",
"shuffle_data",
"(",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"if",
"len",
"(",
"X",
")",
">",
"1",
":",
"ind",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"X",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"ind",
")",
"Xt",
"=",
"X",
"[",
"ind",
"]",
"yt",
"=",
"y",
"swt",
"=",
"sample_weight",
"if",
"yt",
"is",
"not",
"None",
":",
"yt",
"=",
"yt",
"[",
"ind",
"]",
"if",
"swt",
"is",
"not",
"None",
":",
"swt",
"=",
"swt",
"[",
"ind",
"]",
"return",
"Xt",
",",
"yt",
",",
"swt",
"else",
":",
"return",
"X",
",",
"y",
",",
"sample_weight"
] | Shuffles indices X, y, and sample_weight together | [
"Shuffles",
"indices",
"X",
"y",
"and",
"sample_weight",
"together"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L70-L86 | train | 232,520 |
dmbee/seglearn | seglearn/transform.py | expand_variables_to_segments | def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))]) | python | def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))]) | [
"def",
"expand_variables_to_segments",
"(",
"v",
",",
"Nt",
")",
":",
"N_v",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"v",
"[",
"0",
"]",
")",
")",
"return",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"full",
"(",
"(",
"Nt",
"[",
"i",
"]",
",",
"N_v",
")",
",",
"v",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"v",
")",
")",
"]",
")"
] | expands contextual variables v, by repeating each instance as specified in Nt | [
"expands",
"contextual",
"variables",
"v",
"by",
"repeating",
"each",
"instance",
"as",
"specified",
"in",
"Nt"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L549-L552 | train | 232,521 |
dmbee/seglearn | seglearn/transform.py | sliding_window | def sliding_window(time_series, width, step, order='F'):
'''
Segments univariate time series with sliding window
Parameters
----------
time_series : array like shape [n_samples]
time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
w : array like shape [n_segments, width]
resampled time series segments
'''
w = np.hstack([time_series[i:1 + i - width or None:step] for i in range(0, width)])
result = w.reshape((int(len(w) / width), width), order='F')
if order == 'F':
return result
else:
return np.ascontiguousarray(result) | python | def sliding_window(time_series, width, step, order='F'):
'''
Segments univariate time series with sliding window
Parameters
----------
time_series : array like shape [n_samples]
time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
w : array like shape [n_segments, width]
resampled time series segments
'''
w = np.hstack([time_series[i:1 + i - width or None:step] for i in range(0, width)])
result = w.reshape((int(len(w) / width), width), order='F')
if order == 'F':
return result
else:
return np.ascontiguousarray(result) | [
"def",
"sliding_window",
"(",
"time_series",
",",
"width",
",",
"step",
",",
"order",
"=",
"'F'",
")",
":",
"w",
"=",
"np",
".",
"hstack",
"(",
"[",
"time_series",
"[",
"i",
":",
"1",
"+",
"i",
"-",
"width",
"or",
"None",
":",
"step",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"width",
")",
"]",
")",
"result",
"=",
"w",
".",
"reshape",
"(",
"(",
"int",
"(",
"len",
"(",
"w",
")",
"/",
"width",
")",
",",
"width",
")",
",",
"order",
"=",
"'F'",
")",
"if",
"order",
"==",
"'F'",
":",
"return",
"result",
"else",
":",
"return",
"np",
".",
"ascontiguousarray",
"(",
"result",
")"
] | Segments univariate time series with sliding window
Parameters
----------
time_series : array like shape [n_samples]
time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
w : array like shape [n_segments, width]
resampled time series segments | [
"Segments",
"univariate",
"time",
"series",
"with",
"sliding",
"window"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L555-L578 | train | 232,522 |
dmbee/seglearn | seglearn/transform.py | sliding_tensor | def sliding_tensor(mv_time_series, width, step, order='F'):
'''
segments multivariate time series with sliding window
Parameters
----------
mv_time_series : array like shape [n_samples, n_variables]
multivariate time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
data : array like shape [n_segments, width, n_variables]
segmented multivariate time series data
'''
D = mv_time_series.shape[1]
data = [sliding_window(mv_time_series[:, j], width, step, order) for j in range(D)]
return np.stack(data, axis=2) | python | def sliding_tensor(mv_time_series, width, step, order='F'):
'''
segments multivariate time series with sliding window
Parameters
----------
mv_time_series : array like shape [n_samples, n_variables]
multivariate time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
data : array like shape [n_segments, width, n_variables]
segmented multivariate time series data
'''
D = mv_time_series.shape[1]
data = [sliding_window(mv_time_series[:, j], width, step, order) for j in range(D)]
return np.stack(data, axis=2) | [
"def",
"sliding_tensor",
"(",
"mv_time_series",
",",
"width",
",",
"step",
",",
"order",
"=",
"'F'",
")",
":",
"D",
"=",
"mv_time_series",
".",
"shape",
"[",
"1",
"]",
"data",
"=",
"[",
"sliding_window",
"(",
"mv_time_series",
"[",
":",
",",
"j",
"]",
",",
"width",
",",
"step",
",",
"order",
")",
"for",
"j",
"in",
"range",
"(",
"D",
")",
"]",
"return",
"np",
".",
"stack",
"(",
"data",
",",
"axis",
"=",
"2",
")"
] | segments multivariate time series with sliding window
Parameters
----------
mv_time_series : array like shape [n_samples, n_variables]
multivariate time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
data : array like shape [n_segments, width, n_variables]
segmented multivariate time series data | [
"segments",
"multivariate",
"time",
"series",
"with",
"sliding",
"window"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L581-L601 | train | 232,523 |
dmbee/seglearn | seglearn/transform.py | SegmentXY.transform | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into segments
Note this transformation changes the number of samples in the data
If y is provided, it is segmented and transformed to align to the new samples as per
``y_func``
Currently sample weights always returned as None
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
N = len(Xt) # number of time series
if Xt[0].ndim > 1:
Xt = np.array([sliding_tensor(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
else:
Xt = np.array([sliding_window(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
Nt = [len(Xt[i]) for i in np.arange(len(Xt))]
Xt = np.concatenate(Xt)
if Xc is not None:
Xc = expand_variables_to_segments(Xc, Nt)
Xt = TS_Data(Xt, Xc)
if yt is not None:
yt = np.array([sliding_window(yt[i], self.width, self._step, self.order)
for i in np.arange(N)])
yt = np.concatenate(yt)
yt = self.y_func(yt)
if self.shuffle is True:
check_random_state(self.random_state)
Xt, yt, _ = shuffle_data(Xt, yt)
return Xt, yt, None | python | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into segments
Note this transformation changes the number of samples in the data
If y is provided, it is segmented and transformed to align to the new samples as per
``y_func``
Currently sample weights always returned as None
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
N = len(Xt) # number of time series
if Xt[0].ndim > 1:
Xt = np.array([sliding_tensor(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
else:
Xt = np.array([sliding_window(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
Nt = [len(Xt[i]) for i in np.arange(len(Xt))]
Xt = np.concatenate(Xt)
if Xc is not None:
Xc = expand_variables_to_segments(Xc, Nt)
Xt = TS_Data(Xt, Xc)
if yt is not None:
yt = np.array([sliding_window(yt[i], self.width, self._step, self.order)
for i in np.arange(N)])
yt = np.concatenate(yt)
yt = self.y_func(yt)
if self.shuffle is True:
check_random_state(self.random_state)
Xt, yt, _ = shuffle_data(Xt, yt)
return Xt, yt, None | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"check_ts_data",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"yt",
"=",
"y",
"N",
"=",
"len",
"(",
"Xt",
")",
"# number of time series",
"if",
"Xt",
"[",
"0",
"]",
".",
"ndim",
">",
"1",
":",
"Xt",
"=",
"np",
".",
"array",
"(",
"[",
"sliding_tensor",
"(",
"Xt",
"[",
"i",
"]",
",",
"self",
".",
"width",
",",
"self",
".",
"_step",
",",
"self",
".",
"order",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
"]",
")",
"else",
":",
"Xt",
"=",
"np",
".",
"array",
"(",
"[",
"sliding_window",
"(",
"Xt",
"[",
"i",
"]",
",",
"self",
".",
"width",
",",
"self",
".",
"_step",
",",
"self",
".",
"order",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
"]",
")",
"Nt",
"=",
"[",
"len",
"(",
"Xt",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"Xt",
")",
")",
"]",
"Xt",
"=",
"np",
".",
"concatenate",
"(",
"Xt",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xc",
"=",
"expand_variables_to_segments",
"(",
"Xc",
",",
"Nt",
")",
"Xt",
"=",
"TS_Data",
"(",
"Xt",
",",
"Xc",
")",
"if",
"yt",
"is",
"not",
"None",
":",
"yt",
"=",
"np",
".",
"array",
"(",
"[",
"sliding_window",
"(",
"yt",
"[",
"i",
"]",
",",
"self",
".",
"width",
",",
"self",
".",
"_step",
",",
"self",
".",
"order",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
"]",
")",
"yt",
"=",
"np",
".",
"concatenate",
"(",
"yt",
")",
"yt",
"=",
"self",
".",
"y_func",
"(",
"yt",
")",
"if",
"self",
".",
"shuffle",
"is",
"True",
":",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"Xt",
",",
"yt",
",",
"_",
"=",
"shuffle_data",
"(",
"Xt",
",",
"yt",
")",
"return",
"Xt",
",",
"yt",
",",
"None"
] | Transforms the time series data into segments
Note this transformation changes the number of samples in the data
If y is provided, it is segmented and transformed to align to the new samples as per
``y_func``
Currently sample weights always returned as None
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : None | [
"Transforms",
"the",
"time",
"series",
"data",
"into",
"segments",
"Note",
"this",
"transformation",
"changes",
"the",
"number",
"of",
"samples",
"in",
"the",
"data",
"If",
"y",
"is",
"provided",
"it",
"is",
"segmented",
"and",
"transformed",
"to",
"align",
"to",
"the",
"new",
"samples",
"as",
"per",
"y_func",
"Currently",
"sample",
"weights",
"always",
"returned",
"as",
"None"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L329-L385 | train | 232,524 |
dmbee/seglearn | seglearn/transform.py | PadTrunc.transform | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into fixed length segments using padding and or truncation
If y is a time series and passed, it will be transformed as well
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
Xt = self._mv_resize(Xt)
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
yt = self._mv_resize(yt)
swt = None
elif yt is not None:
# todo: is this needed?
yt = np.array(yt)
return Xt, yt, swt | python | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into fixed length segments using padding and or truncation
If y is a time series and passed, it will be transformed as well
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
Xt = self._mv_resize(Xt)
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
yt = self._mv_resize(yt)
swt = None
elif yt is not None:
# todo: is this needed?
yt = np.array(yt)
return Xt, yt, swt | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"check_ts_data",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"yt",
"=",
"y",
"swt",
"=",
"sample_weight",
"Xt",
"=",
"self",
".",
"_mv_resize",
"(",
"Xt",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xt",
"=",
"TS_Data",
"(",
"Xt",
",",
"Xc",
")",
"if",
"yt",
"is",
"not",
"None",
"and",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"yt",
"[",
"0",
"]",
")",
")",
">",
"1",
":",
"# y is a time series",
"yt",
"=",
"self",
".",
"_mv_resize",
"(",
"yt",
")",
"swt",
"=",
"None",
"elif",
"yt",
"is",
"not",
"None",
":",
"# todo: is this needed?",
"yt",
"=",
"np",
".",
"array",
"(",
"yt",
")",
"return",
"Xt",
",",
"yt",
",",
"swt"
] | Transforms the time series data into fixed length segments using padding and or truncation
If y is a time series and passed, it will be transformed as well
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : None | [
"Transforms",
"the",
"time",
"series",
"data",
"into",
"fixed",
"length",
"segments",
"using",
"padding",
"and",
"or",
"truncation",
"If",
"y",
"is",
"a",
"time",
"series",
"and",
"passed",
"it",
"will",
"be",
"transformed",
"as",
"well"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L655-L696 | train | 232,525 |
dmbee/seglearn | seglearn/transform.py | InterpLongToWide._check_data | def _check_data(self, X):
'''
Checks that unique identifiers vaf_types are consistent between time series.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
'''
if len(X) > 1:
sval = np.unique(X[0][:, 1])
if np.all([np.all(np.unique(X[i][:, 1]) == sval) for i in range(1, len(X))]):
pass
else:
raise ValueError("Unique identifier var_types not consistent between time series") | python | def _check_data(self, X):
'''
Checks that unique identifiers vaf_types are consistent between time series.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
'''
if len(X) > 1:
sval = np.unique(X[0][:, 1])
if np.all([np.all(np.unique(X[i][:, 1]) == sval) for i in range(1, len(X))]):
pass
else:
raise ValueError("Unique identifier var_types not consistent between time series") | [
"def",
"_check_data",
"(",
"self",
",",
"X",
")",
":",
"if",
"len",
"(",
"X",
")",
">",
"1",
":",
"sval",
"=",
"np",
".",
"unique",
"(",
"X",
"[",
"0",
"]",
"[",
":",
",",
"1",
"]",
")",
"if",
"np",
".",
"all",
"(",
"[",
"np",
".",
"all",
"(",
"np",
".",
"unique",
"(",
"X",
"[",
"i",
"]",
"[",
":",
",",
"1",
"]",
")",
"==",
"sval",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"X",
")",
")",
"]",
")",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unique identifier var_types not consistent between time series\"",
")"
] | Checks that unique identifiers vaf_types are consistent between time series.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data | [
"Checks",
"that",
"unique",
"identifiers",
"vaf_types",
"are",
"consistent",
"between",
"time",
"series",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L900-L915 | train | 232,526 |
dmbee/seglearn | seglearn/transform.py | FeatureRep._check_features | def _check_features(self, features, Xti):
'''
tests output of each feature against a segmented time series X
Parameters
----------
features : dict
feature function dictionary
Xti : array-like, shape [n_samples, segment_width, n_variables]
segmented time series (instance)
Returns
-------
ftr_sizes : dict
number of features output by each feature function
'''
N = Xti.shape[0]
N_fts = len(features)
fshapes = np.zeros((N_fts, 2), dtype=np.int)
keys = [key for key in features]
for i in np.arange(N_fts):
fshapes[i] = np.row_stack(features[keys[i]](Xti)).shape
# make sure each feature returns an array shape [N, ]
if not np.all(fshapes[:, 0] == N):
raise ValueError("feature function returned array with invalid length, ",
np.array(features.keys())[fshapes[:, 0] != N])
return {keys[i]: fshapes[i, 1] for i in range(N_fts)} | python | def _check_features(self, features, Xti):
'''
tests output of each feature against a segmented time series X
Parameters
----------
features : dict
feature function dictionary
Xti : array-like, shape [n_samples, segment_width, n_variables]
segmented time series (instance)
Returns
-------
ftr_sizes : dict
number of features output by each feature function
'''
N = Xti.shape[0]
N_fts = len(features)
fshapes = np.zeros((N_fts, 2), dtype=np.int)
keys = [key for key in features]
for i in np.arange(N_fts):
fshapes[i] = np.row_stack(features[keys[i]](Xti)).shape
# make sure each feature returns an array shape [N, ]
if not np.all(fshapes[:, 0] == N):
raise ValueError("feature function returned array with invalid length, ",
np.array(features.keys())[fshapes[:, 0] != N])
return {keys[i]: fshapes[i, 1] for i in range(N_fts)} | [
"def",
"_check_features",
"(",
"self",
",",
"features",
",",
"Xti",
")",
":",
"N",
"=",
"Xti",
".",
"shape",
"[",
"0",
"]",
"N_fts",
"=",
"len",
"(",
"features",
")",
"fshapes",
"=",
"np",
".",
"zeros",
"(",
"(",
"N_fts",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"keys",
"=",
"[",
"key",
"for",
"key",
"in",
"features",
"]",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N_fts",
")",
":",
"fshapes",
"[",
"i",
"]",
"=",
"np",
".",
"row_stack",
"(",
"features",
"[",
"keys",
"[",
"i",
"]",
"]",
"(",
"Xti",
")",
")",
".",
"shape",
"# make sure each feature returns an array shape [N, ]",
"if",
"not",
"np",
".",
"all",
"(",
"fshapes",
"[",
":",
",",
"0",
"]",
"==",
"N",
")",
":",
"raise",
"ValueError",
"(",
"\"feature function returned array with invalid length, \"",
",",
"np",
".",
"array",
"(",
"features",
".",
"keys",
"(",
")",
")",
"[",
"fshapes",
"[",
":",
",",
"0",
"]",
"!=",
"N",
"]",
")",
"return",
"{",
"keys",
"[",
"i",
"]",
":",
"fshapes",
"[",
"i",
",",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"N_fts",
")",
"}"
] | tests output of each feature against a segmented time series X
Parameters
----------
features : dict
feature function dictionary
Xti : array-like, shape [n_samples, segment_width, n_variables]
segmented time series (instance)
Returns
-------
ftr_sizes : dict
number of features output by each feature function | [
"tests",
"output",
"of",
"each",
"feature",
"against",
"a",
"segmented",
"time",
"series",
"X"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1137-L1165 | train | 232,527 |
dmbee/seglearn | seglearn/transform.py | FeatureRep._generate_feature_labels | def _generate_feature_labels(self, X):
'''
Generates string feature labels
'''
Xt, Xc = get_ts_data_parts(X)
ftr_sizes = self._check_features(self.features, Xt[0:3])
f_labels = []
# calculated features
for key in ftr_sizes:
for i in range(ftr_sizes[key]):
f_labels += [key + '_' + str(i)]
# contextual features
if Xc is not None:
Ns = len(np.atleast_1d(Xc[0]))
s_labels = ["context_" + str(i) for i in range(Ns)]
f_labels += s_labels
return f_labels | python | def _generate_feature_labels(self, X):
'''
Generates string feature labels
'''
Xt, Xc = get_ts_data_parts(X)
ftr_sizes = self._check_features(self.features, Xt[0:3])
f_labels = []
# calculated features
for key in ftr_sizes:
for i in range(ftr_sizes[key]):
f_labels += [key + '_' + str(i)]
# contextual features
if Xc is not None:
Ns = len(np.atleast_1d(Xc[0]))
s_labels = ["context_" + str(i) for i in range(Ns)]
f_labels += s_labels
return f_labels | [
"def",
"_generate_feature_labels",
"(",
"self",
",",
"X",
")",
":",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"ftr_sizes",
"=",
"self",
".",
"_check_features",
"(",
"self",
".",
"features",
",",
"Xt",
"[",
"0",
":",
"3",
"]",
")",
"f_labels",
"=",
"[",
"]",
"# calculated features",
"for",
"key",
"in",
"ftr_sizes",
":",
"for",
"i",
"in",
"range",
"(",
"ftr_sizes",
"[",
"key",
"]",
")",
":",
"f_labels",
"+=",
"[",
"key",
"+",
"'_'",
"+",
"str",
"(",
"i",
")",
"]",
"# contextual features",
"if",
"Xc",
"is",
"not",
"None",
":",
"Ns",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"Xc",
"[",
"0",
"]",
")",
")",
"s_labels",
"=",
"[",
"\"context_\"",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"Ns",
")",
"]",
"f_labels",
"+=",
"s_labels",
"return",
"f_labels"
] | Generates string feature labels | [
"Generates",
"string",
"feature",
"labels"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1167-L1187 | train | 232,528 |
dmbee/seglearn | seglearn/transform.py | FeatureRepMix._retrieve_indices | def _retrieve_indices(cols):
'''
Retrieve a list of indices corresponding to the provided column specification.
'''
if isinstance(cols, int):
return [cols]
elif isinstance(cols, slice):
start = cols.start if cols.start else 0
stop = cols.stop
step = cols.step if cols.step else 1
return list(range(start, stop, step))
elif isinstance(cols, list) and cols:
if isinstance(cols[0], bool):
return np.flatnonzero(np.asarray(cols))
elif isinstance(cols[0], int):
return cols
else:
raise TypeError('No valid column specifier. Only a scalar, list or slice of all'
'integers or a boolean mask are allowed.') | python | def _retrieve_indices(cols):
'''
Retrieve a list of indices corresponding to the provided column specification.
'''
if isinstance(cols, int):
return [cols]
elif isinstance(cols, slice):
start = cols.start if cols.start else 0
stop = cols.stop
step = cols.step if cols.step else 1
return list(range(start, stop, step))
elif isinstance(cols, list) and cols:
if isinstance(cols[0], bool):
return np.flatnonzero(np.asarray(cols))
elif isinstance(cols[0], int):
return cols
else:
raise TypeError('No valid column specifier. Only a scalar, list or slice of all'
'integers or a boolean mask are allowed.') | [
"def",
"_retrieve_indices",
"(",
"cols",
")",
":",
"if",
"isinstance",
"(",
"cols",
",",
"int",
")",
":",
"return",
"[",
"cols",
"]",
"elif",
"isinstance",
"(",
"cols",
",",
"slice",
")",
":",
"start",
"=",
"cols",
".",
"start",
"if",
"cols",
".",
"start",
"else",
"0",
"stop",
"=",
"cols",
".",
"stop",
"step",
"=",
"cols",
".",
"step",
"if",
"cols",
".",
"step",
"else",
"1",
"return",
"list",
"(",
"range",
"(",
"start",
",",
"stop",
",",
"step",
")",
")",
"elif",
"isinstance",
"(",
"cols",
",",
"list",
")",
"and",
"cols",
":",
"if",
"isinstance",
"(",
"cols",
"[",
"0",
"]",
",",
"bool",
")",
":",
"return",
"np",
".",
"flatnonzero",
"(",
"np",
".",
"asarray",
"(",
"cols",
")",
")",
"elif",
"isinstance",
"(",
"cols",
"[",
"0",
"]",
",",
"int",
")",
":",
"return",
"cols",
"else",
":",
"raise",
"TypeError",
"(",
"'No valid column specifier. Only a scalar, list or slice of all'",
"'integers or a boolean mask are allowed.'",
")"
] | Retrieve a list of indices corresponding to the provided column specification. | [
"Retrieve",
"a",
"list",
"of",
"indices",
"corresponding",
"to",
"the",
"provided",
"column",
"specification",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1301-L1319 | train | 232,529 |
dmbee/seglearn | seglearn/transform.py | FeatureRepMix._validate | def _validate(self):
'''
Internal function to validate the transformer before applying all internal transformers.
'''
if self.f_labels is None:
raise NotFittedError('FeatureRepMix')
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate transformers
for trans in transformers:
if not isinstance(trans, FeatureRep):
raise TypeError("All transformers must be an instance of FeatureRep."
" '%s' (type %s) doesn't." % (trans, type(trans))) | python | def _validate(self):
'''
Internal function to validate the transformer before applying all internal transformers.
'''
if self.f_labels is None:
raise NotFittedError('FeatureRepMix')
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate transformers
for trans in transformers:
if not isinstance(trans, FeatureRep):
raise TypeError("All transformers must be an instance of FeatureRep."
" '%s' (type %s) doesn't." % (trans, type(trans))) | [
"def",
"_validate",
"(",
"self",
")",
":",
"if",
"self",
".",
"f_labels",
"is",
"None",
":",
"raise",
"NotFittedError",
"(",
"'FeatureRepMix'",
")",
"if",
"not",
"self",
".",
"transformers",
":",
"return",
"names",
",",
"transformers",
",",
"_",
"=",
"zip",
"(",
"*",
"self",
".",
"transformers",
")",
"# validate names",
"self",
".",
"_validate_names",
"(",
"names",
")",
"# validate transformers",
"for",
"trans",
"in",
"transformers",
":",
"if",
"not",
"isinstance",
"(",
"trans",
",",
"FeatureRep",
")",
":",
"raise",
"TypeError",
"(",
"\"All transformers must be an instance of FeatureRep.\"",
"\" '%s' (type %s) doesn't.\"",
"%",
"(",
"trans",
",",
"type",
"(",
"trans",
")",
")",
")"
] | Internal function to validate the transformer before applying all internal transformers. | [
"Internal",
"function",
"to",
"validate",
"the",
"transformer",
"before",
"applying",
"all",
"internal",
"transformers",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1355-L1374 | train | 232,530 |
dmbee/seglearn | seglearn/transform.py | FunctionTransformer.transform | def transform(self, X):
'''
Transforms the time series data based on the provided function. Note this transformation
must not change the number of samples in the data.
Parameters
----------
X : array-like, shape [n_samples, ...]
time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_samples, ...]
transformed time series data
'''
if self.func is None:
return X
else:
Xt, Xc = get_ts_data_parts(X)
n_samples = len(Xt)
Xt = self.func(Xt, **self.func_kwargs)
if len(Xt) != n_samples:
raise ValueError("FunctionTransformer changes sample number (not supported).")
if Xc is not None:
Xt = TS_Data(Xt, Xc)
return Xt | python | def transform(self, X):
'''
Transforms the time series data based on the provided function. Note this transformation
must not change the number of samples in the data.
Parameters
----------
X : array-like, shape [n_samples, ...]
time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_samples, ...]
transformed time series data
'''
if self.func is None:
return X
else:
Xt, Xc = get_ts_data_parts(X)
n_samples = len(Xt)
Xt = self.func(Xt, **self.func_kwargs)
if len(Xt) != n_samples:
raise ValueError("FunctionTransformer changes sample number (not supported).")
if Xc is not None:
Xt = TS_Data(Xt, Xc)
return Xt | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"if",
"self",
".",
"func",
"is",
"None",
":",
"return",
"X",
"else",
":",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"n_samples",
"=",
"len",
"(",
"Xt",
")",
"Xt",
"=",
"self",
".",
"func",
"(",
"Xt",
",",
"*",
"*",
"self",
".",
"func_kwargs",
")",
"if",
"len",
"(",
"Xt",
")",
"!=",
"n_samples",
":",
"raise",
"ValueError",
"(",
"\"FunctionTransformer changes sample number (not supported).\"",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xt",
"=",
"TS_Data",
"(",
"Xt",
",",
"Xc",
")",
"return",
"Xt"
] | Transforms the time series data based on the provided function. Note this transformation
must not change the number of samples in the data.
Parameters
----------
X : array-like, shape [n_samples, ...]
time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_samples, ...]
transformed time series data | [
"Transforms",
"the",
"time",
"series",
"data",
"based",
"on",
"the",
"provided",
"function",
".",
"Note",
"this",
"transformation",
"must",
"not",
"change",
"the",
"number",
"of",
"samples",
"in",
"the",
"data",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1465-L1491 | train | 232,531 |
SAP/PyHDB | pyhdb/protocol/segments.py | RequestSegment.build_payload | def build_payload(self, payload):
"""Build payload of all parts and write them into the payload buffer"""
remaining_size = self.MAX_SEGMENT_PAYLOAD_SIZE
for part in self.parts:
part_payload = part.pack(remaining_size)
payload.write(part_payload)
remaining_size -= len(part_payload) | python | def build_payload(self, payload):
"""Build payload of all parts and write them into the payload buffer"""
remaining_size = self.MAX_SEGMENT_PAYLOAD_SIZE
for part in self.parts:
part_payload = part.pack(remaining_size)
payload.write(part_payload)
remaining_size -= len(part_payload) | [
"def",
"build_payload",
"(",
"self",
",",
"payload",
")",
":",
"remaining_size",
"=",
"self",
".",
"MAX_SEGMENT_PAYLOAD_SIZE",
"for",
"part",
"in",
"self",
".",
"parts",
":",
"part_payload",
"=",
"part",
".",
"pack",
"(",
"remaining_size",
")",
"payload",
".",
"write",
"(",
"part_payload",
")",
"remaining_size",
"-=",
"len",
"(",
"part_payload",
")"
] | Build payload of all parts and write them into the payload buffer | [
"Build",
"payload",
"of",
"all",
"parts",
"and",
"write",
"them",
"into",
"the",
"payload",
"buffer"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/segments.py#L75-L82 | train | 232,532 |
SAP/PyHDB | pyhdb/protocol/types.py | escape | def escape(value):
"""
Escape a single value.
"""
if isinstance(value, (tuple, list)):
return "(" + ", ".join([escape(arg) for arg in value]) + ")"
else:
typ = by_python_type.get(value.__class__)
if typ is None:
raise InterfaceError(
"Unsupported python input: %s (%s)" % (value, value.__class__)
)
return typ.to_sql(value) | python | def escape(value):
"""
Escape a single value.
"""
if isinstance(value, (tuple, list)):
return "(" + ", ".join([escape(arg) for arg in value]) + ")"
else:
typ = by_python_type.get(value.__class__)
if typ is None:
raise InterfaceError(
"Unsupported python input: %s (%s)" % (value, value.__class__)
)
return typ.to_sql(value) | [
"def",
"escape",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"\"(\"",
"+",
"\", \"",
".",
"join",
"(",
"[",
"escape",
"(",
"arg",
")",
"for",
"arg",
"in",
"value",
"]",
")",
"+",
"\")\"",
"else",
":",
"typ",
"=",
"by_python_type",
".",
"get",
"(",
"value",
".",
"__class__",
")",
"if",
"typ",
"is",
"None",
":",
"raise",
"InterfaceError",
"(",
"\"Unsupported python input: %s (%s)\"",
"%",
"(",
"value",
",",
"value",
".",
"__class__",
")",
")",
"return",
"typ",
".",
"to_sql",
"(",
"value",
")"
] | Escape a single value. | [
"Escape",
"a",
"single",
"value",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L555-L569 | train | 232,533 |
SAP/PyHDB | pyhdb/protocol/types.py | escape_values | def escape_values(values):
"""
Escape multiple values from a list, tuple or dict.
"""
if isinstance(values, (tuple, list)):
return tuple([escape(value) for value in values])
elif isinstance(values, dict):
return dict([
(key, escape(value)) for (key, value) in values.items()
])
else:
raise InterfaceError("escape_values expects list, tuple or dict") | python | def escape_values(values):
"""
Escape multiple values from a list, tuple or dict.
"""
if isinstance(values, (tuple, list)):
return tuple([escape(value) for value in values])
elif isinstance(values, dict):
return dict([
(key, escape(value)) for (key, value) in values.items()
])
else:
raise InterfaceError("escape_values expects list, tuple or dict") | [
"def",
"escape_values",
"(",
"values",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"tuple",
"(",
"[",
"escape",
"(",
"value",
")",
"for",
"value",
"in",
"values",
"]",
")",
"elif",
"isinstance",
"(",
"values",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"key",
",",
"escape",
"(",
"value",
")",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"values",
".",
"items",
"(",
")",
"]",
")",
"else",
":",
"raise",
"InterfaceError",
"(",
"\"escape_values expects list, tuple or dict\"",
")"
] | Escape multiple values from a list, tuple or dict. | [
"Escape",
"multiple",
"values",
"from",
"a",
"list",
"tuple",
"or",
"dict",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L572-L583 | train | 232,534 |
SAP/PyHDB | pyhdb/protocol/types.py | Date.prepare | def prepare(cls, value):
"""Pack datetime value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
value = datetime.datetime.strptime(value, "%Y-%m-%d")
year = value.year | 0x8000 # for some unknown reasons year has to be bit-or'ed with 0x8000
month = value.month - 1 # for some unknown reasons HANA counts months starting from zero
pfield += cls._struct.pack(year, month, value.day)
return pfield | python | def prepare(cls, value):
"""Pack datetime value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
value = datetime.datetime.strptime(value, "%Y-%m-%d")
year = value.year | 0x8000 # for some unknown reasons year has to be bit-or'ed with 0x8000
month = value.month - 1 # for some unknown reasons HANA counts months starting from zero
pfield += cls._struct.pack(year, month, value.day)
return pfield | [
"def",
"prepare",
"(",
"cls",
",",
"value",
")",
":",
"pfield",
"=",
"struct",
".",
"pack",
"(",
"'b'",
",",
"cls",
".",
"type_code",
")",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"value",
",",
"\"%Y-%m-%d\"",
")",
"year",
"=",
"value",
".",
"year",
"|",
"0x8000",
"# for some unknown reasons year has to be bit-or'ed with 0x8000",
"month",
"=",
"value",
".",
"month",
"-",
"1",
"# for some unknown reasons HANA counts months starting from zero",
"pfield",
"+=",
"cls",
".",
"_struct",
".",
"pack",
"(",
"year",
",",
"month",
",",
"value",
".",
"day",
")",
"return",
"pfield"
] | Pack datetime value into proper binary format | [
"Pack",
"datetime",
"value",
"into",
"proper",
"binary",
"format"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L368-L376 | train | 232,535 |
SAP/PyHDB | pyhdb/protocol/types.py | Time.prepare | def prepare(cls, value):
"""Pack time value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
if "." in value:
value = datetime.datetime.strptime(value, "%H:%M:%S.%f")
else:
value = datetime.datetime.strptime(value, "%H:%M:%S")
millisecond = value.second * 1000 + value.microsecond // 1000
hour = value.hour | 0x80 # for some unknown reasons hour has to be bit-or'ed with 0x80
pfield += cls._struct.pack(hour, value.minute, millisecond)
return pfield | python | def prepare(cls, value):
"""Pack time value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
if "." in value:
value = datetime.datetime.strptime(value, "%H:%M:%S.%f")
else:
value = datetime.datetime.strptime(value, "%H:%M:%S")
millisecond = value.second * 1000 + value.microsecond // 1000
hour = value.hour | 0x80 # for some unknown reasons hour has to be bit-or'ed with 0x80
pfield += cls._struct.pack(hour, value.minute, millisecond)
return pfield | [
"def",
"prepare",
"(",
"cls",
",",
"value",
")",
":",
"pfield",
"=",
"struct",
".",
"pack",
"(",
"'b'",
",",
"cls",
".",
"type_code",
")",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
":",
"if",
"\".\"",
"in",
"value",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"value",
",",
"\"%H:%M:%S.%f\"",
")",
"else",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"value",
",",
"\"%H:%M:%S\"",
")",
"millisecond",
"=",
"value",
".",
"second",
"*",
"1000",
"+",
"value",
".",
"microsecond",
"//",
"1000",
"hour",
"=",
"value",
".",
"hour",
"|",
"0x80",
"# for some unknown reasons hour has to be bit-or'ed with 0x80",
"pfield",
"+=",
"cls",
".",
"_struct",
".",
"pack",
"(",
"hour",
",",
"value",
".",
"minute",
",",
"millisecond",
")",
"return",
"pfield"
] | Pack time value into proper binary format | [
"Pack",
"time",
"value",
"into",
"proper",
"binary",
"format"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L439-L450 | train | 232,536 |
SAP/PyHDB | pyhdb/protocol/types.py | MixinLobType.prepare | def prepare(cls, value, length=0, position=0, is_last_data=True):
"""Prepare Lob header.
Note that the actual lob data is NOT written here but appended after the parameter block for each row!
"""
hstruct = WriteLobHeader.header_struct
lob_option_dataincluded = WriteLobHeader.LOB_OPTION_DATAINCLUDED if length > 0 else 0
lob_option_lastdata = WriteLobHeader.LOB_OPTION_LASTDATA if is_last_data else 0
options = lob_option_dataincluded | lob_option_lastdata
pfield = hstruct.pack(cls.type_code, options, length, position)
return pfield | python | def prepare(cls, value, length=0, position=0, is_last_data=True):
"""Prepare Lob header.
Note that the actual lob data is NOT written here but appended after the parameter block for each row!
"""
hstruct = WriteLobHeader.header_struct
lob_option_dataincluded = WriteLobHeader.LOB_OPTION_DATAINCLUDED if length > 0 else 0
lob_option_lastdata = WriteLobHeader.LOB_OPTION_LASTDATA if is_last_data else 0
options = lob_option_dataincluded | lob_option_lastdata
pfield = hstruct.pack(cls.type_code, options, length, position)
return pfield | [
"def",
"prepare",
"(",
"cls",
",",
"value",
",",
"length",
"=",
"0",
",",
"position",
"=",
"0",
",",
"is_last_data",
"=",
"True",
")",
":",
"hstruct",
"=",
"WriteLobHeader",
".",
"header_struct",
"lob_option_dataincluded",
"=",
"WriteLobHeader",
".",
"LOB_OPTION_DATAINCLUDED",
"if",
"length",
">",
"0",
"else",
"0",
"lob_option_lastdata",
"=",
"WriteLobHeader",
".",
"LOB_OPTION_LASTDATA",
"if",
"is_last_data",
"else",
"0",
"options",
"=",
"lob_option_dataincluded",
"|",
"lob_option_lastdata",
"pfield",
"=",
"hstruct",
".",
"pack",
"(",
"cls",
".",
"type_code",
",",
"options",
",",
"length",
",",
"position",
")",
"return",
"pfield"
] | Prepare Lob header.
Note that the actual lob data is NOT written here but appended after the parameter block for each row! | [
"Prepare",
"Lob",
"header",
".",
"Note",
"that",
"the",
"actual",
"lob",
"data",
"is",
"NOT",
"written",
"here",
"but",
"appended",
"after",
"the",
"parameter",
"block",
"for",
"each",
"row!"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L501-L510 | train | 232,537 |
SAP/PyHDB | pyhdb/protocol/lobs.py | Lob.seek | def seek(self, offset, whence=SEEK_SET):
"""Seek pointer in lob data buffer to requested position.
Might trigger further loading of data from the database if the pointer is beyond currently read data.
"""
# A nice trick is to (ab)use BytesIO.seek() to go to the desired position for easier calculation.
# This will not add any data to the buffer however - very convenient!
self.data.seek(offset, whence)
new_pos = self.data.tell()
missing_bytes_to_read = new_pos - self._current_lob_length
if missing_bytes_to_read > 0:
# Trying to seek beyond currently available LOB data, so need to load some more first.
# We are smart here: (at least trying...):
# If a user sets a certain file position s/he probably wants to read data from
# there. So already read some extra data to avoid yet another immediate
# reading step. Try with EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK additional items (bytes/chars).
# jump to the end of the current buffer and read the new data:
self.data.seek(0, SEEK_END)
self.read(missing_bytes_to_read + self.EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK)
# reposition file pointer a originally desired position:
self.data.seek(new_pos)
return new_pos | python | def seek(self, offset, whence=SEEK_SET):
"""Seek pointer in lob data buffer to requested position.
Might trigger further loading of data from the database if the pointer is beyond currently read data.
"""
# A nice trick is to (ab)use BytesIO.seek() to go to the desired position for easier calculation.
# This will not add any data to the buffer however - very convenient!
self.data.seek(offset, whence)
new_pos = self.data.tell()
missing_bytes_to_read = new_pos - self._current_lob_length
if missing_bytes_to_read > 0:
# Trying to seek beyond currently available LOB data, so need to load some more first.
# We are smart here: (at least trying...):
# If a user sets a certain file position s/he probably wants to read data from
# there. So already read some extra data to avoid yet another immediate
# reading step. Try with EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK additional items (bytes/chars).
# jump to the end of the current buffer and read the new data:
self.data.seek(0, SEEK_END)
self.read(missing_bytes_to_read + self.EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK)
# reposition file pointer a originally desired position:
self.data.seek(new_pos)
return new_pos | [
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"SEEK_SET",
")",
":",
"# A nice trick is to (ab)use BytesIO.seek() to go to the desired position for easier calculation.",
"# This will not add any data to the buffer however - very convenient!",
"self",
".",
"data",
".",
"seek",
"(",
"offset",
",",
"whence",
")",
"new_pos",
"=",
"self",
".",
"data",
".",
"tell",
"(",
")",
"missing_bytes_to_read",
"=",
"new_pos",
"-",
"self",
".",
"_current_lob_length",
"if",
"missing_bytes_to_read",
">",
"0",
":",
"# Trying to seek beyond currently available LOB data, so need to load some more first.",
"# We are smart here: (at least trying...):",
"# If a user sets a certain file position s/he probably wants to read data from",
"# there. So already read some extra data to avoid yet another immediate",
"# reading step. Try with EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK additional items (bytes/chars).",
"# jump to the end of the current buffer and read the new data:",
"self",
".",
"data",
".",
"seek",
"(",
"0",
",",
"SEEK_END",
")",
"self",
".",
"read",
"(",
"missing_bytes_to_read",
"+",
"self",
".",
"EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK",
")",
"# reposition file pointer a originally desired position:",
"self",
".",
"data",
".",
"seek",
"(",
"new_pos",
")",
"return",
"new_pos"
] | Seek pointer in lob data buffer to requested position.
Might trigger further loading of data from the database if the pointer is beyond currently read data. | [
"Seek",
"pointer",
"in",
"lob",
"data",
"buffer",
"to",
"requested",
"position",
".",
"Might",
"trigger",
"further",
"loading",
"of",
"data",
"from",
"the",
"database",
"if",
"the",
"pointer",
"is",
"beyond",
"currently",
"read",
"data",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/lobs.py#L110-L132 | train | 232,538 |
SAP/PyHDB | pyhdb/protocol/lobs.py | Lob._read_missing_lob_data_from_db | def _read_missing_lob_data_from_db(self, readoffset, readlength):
"""Read LOB request part from database"""
logger.debug('Reading missing lob data from db. Offset: %d, readlength: %d' % (readoffset, readlength))
lob_data = self._make_read_lob_request(readoffset, readlength)
# make sure we really got as many items (not bytes!) as requested:
enc_lob_data = self._decode_lob_data(lob_data)
assert readlength == len(enc_lob_data), 'expected: %d, received; %d' % (readlength, len(enc_lob_data))
# jump to end of data, and append new and properly decoded data to it:
# import pdb;pdb.set_trace()
self.data.seek(0, SEEK_END)
self.data.write(enc_lob_data)
self._current_lob_length = len(self.data.getvalue()) | python | def _read_missing_lob_data_from_db(self, readoffset, readlength):
"""Read LOB request part from database"""
logger.debug('Reading missing lob data from db. Offset: %d, readlength: %d' % (readoffset, readlength))
lob_data = self._make_read_lob_request(readoffset, readlength)
# make sure we really got as many items (not bytes!) as requested:
enc_lob_data = self._decode_lob_data(lob_data)
assert readlength == len(enc_lob_data), 'expected: %d, received; %d' % (readlength, len(enc_lob_data))
# jump to end of data, and append new and properly decoded data to it:
# import pdb;pdb.set_trace()
self.data.seek(0, SEEK_END)
self.data.write(enc_lob_data)
self._current_lob_length = len(self.data.getvalue()) | [
"def",
"_read_missing_lob_data_from_db",
"(",
"self",
",",
"readoffset",
",",
"readlength",
")",
":",
"logger",
".",
"debug",
"(",
"'Reading missing lob data from db. Offset: %d, readlength: %d'",
"%",
"(",
"readoffset",
",",
"readlength",
")",
")",
"lob_data",
"=",
"self",
".",
"_make_read_lob_request",
"(",
"readoffset",
",",
"readlength",
")",
"# make sure we really got as many items (not bytes!) as requested:",
"enc_lob_data",
"=",
"self",
".",
"_decode_lob_data",
"(",
"lob_data",
")",
"assert",
"readlength",
"==",
"len",
"(",
"enc_lob_data",
")",
",",
"'expected: %d, received; %d'",
"%",
"(",
"readlength",
",",
"len",
"(",
"enc_lob_data",
")",
")",
"# jump to end of data, and append new and properly decoded data to it:",
"# import pdb;pdb.set_trace()",
"self",
".",
"data",
".",
"seek",
"(",
"0",
",",
"SEEK_END",
")",
"self",
".",
"data",
".",
"write",
"(",
"enc_lob_data",
")",
"self",
".",
"_current_lob_length",
"=",
"len",
"(",
"self",
".",
"data",
".",
"getvalue",
"(",
")",
")"
] | Read LOB request part from database | [
"Read",
"LOB",
"request",
"part",
"from",
"database"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/lobs.py#L152-L165 | train | 232,539 |
SAP/PyHDB | pyhdb/protocol/lobs.py | Clob._init_io_container | def _init_io_container(self, init_value):
"""Initialize container to hold lob data.
Here either a cStringIO or a io.StringIO class is used depending on the Python version.
For CLobs ensure that an initial unicode value only contains valid ascii chars.
"""
if isinstance(init_value, CLOB_STRING_IO_CLASSES):
# already a valid StringIO instance, just use it as it is
v = init_value
else:
# works for strings and unicodes. However unicodes must only contain valid ascii chars!
if PY3:
# a io.StringIO also accepts any unicode characters, but we must be sure that only
# ascii chars are contained. In PY2 we use a cStringIO class which complains by itself
# if it catches this case, so in PY2 no extra check needs to be performed here.
init_value.encode('ascii') # this is just a check, result not needed!
v = CLOB_STRING_IO(init_value)
return v | python | def _init_io_container(self, init_value):
"""Initialize container to hold lob data.
Here either a cStringIO or a io.StringIO class is used depending on the Python version.
For CLobs ensure that an initial unicode value only contains valid ascii chars.
"""
if isinstance(init_value, CLOB_STRING_IO_CLASSES):
# already a valid StringIO instance, just use it as it is
v = init_value
else:
# works for strings and unicodes. However unicodes must only contain valid ascii chars!
if PY3:
# a io.StringIO also accepts any unicode characters, but we must be sure that only
# ascii chars are contained. In PY2 we use a cStringIO class which complains by itself
# if it catches this case, so in PY2 no extra check needs to be performed here.
init_value.encode('ascii') # this is just a check, result not needed!
v = CLOB_STRING_IO(init_value)
return v | [
"def",
"_init_io_container",
"(",
"self",
",",
"init_value",
")",
":",
"if",
"isinstance",
"(",
"init_value",
",",
"CLOB_STRING_IO_CLASSES",
")",
":",
"# already a valid StringIO instance, just use it as it is",
"v",
"=",
"init_value",
"else",
":",
"# works for strings and unicodes. However unicodes must only contain valid ascii chars!",
"if",
"PY3",
":",
"# a io.StringIO also accepts any unicode characters, but we must be sure that only",
"# ascii chars are contained. In PY2 we use a cStringIO class which complains by itself",
"# if it catches this case, so in PY2 no extra check needs to be performed here.",
"init_value",
".",
"encode",
"(",
"'ascii'",
")",
"# this is just a check, result not needed!",
"v",
"=",
"CLOB_STRING_IO",
"(",
"init_value",
")",
"return",
"v"
] | Initialize container to hold lob data.
Here either a cStringIO or a io.StringIO class is used depending on the Python version.
For CLobs ensure that an initial unicode value only contains valid ascii chars. | [
"Initialize",
"container",
"to",
"hold",
"lob",
"data",
".",
"Here",
"either",
"a",
"cStringIO",
"or",
"a",
"io",
".",
"StringIO",
"class",
"is",
"used",
"depending",
"on",
"the",
"Python",
"version",
".",
"For",
"CLobs",
"ensure",
"that",
"an",
"initial",
"unicode",
"value",
"only",
"contains",
"valid",
"ascii",
"chars",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/lobs.py#L238-L254 | train | 232,540 |
SAP/PyHDB | pyhdb/cursor.py | Cursor._handle_upsert | def _handle_upsert(self, parts, unwritten_lobs=()):
"""Handle reply messages from INSERT or UPDATE statements"""
self.description = None
self._received_last_resultset_part = True # set to 'True' so that cursor.fetch*() returns just empty list
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind in (part_kinds.TRANSACTIONFLAGS, part_kinds.STATEMENTCONTEXT, part_kinds.PARAMETERMETADATA):
pass
elif part.kind == part_kinds.WRITELOBREPLY:
# This part occurrs after lobs have been submitted not at all or only partially during an insert.
# In this case the parameter part of the Request message contains a list called 'unwritten_lobs'
# with LobBuffer instances.
# Those instances are in the same order as 'locator_ids' received in the reply message. These IDs
# are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs.
for lob_buffer, lob_locator_id in izip(unwritten_lobs, part.locator_ids):
# store locator_id in every lob buffer instance for later reference:
lob_buffer.locator_id = lob_locator_id
self._perform_lob_write_requests(unwritten_lobs)
else:
raise InterfaceError("Prepared insert statement response, unexpected part kind %d." % part.kind)
self._executed = True | python | def _handle_upsert(self, parts, unwritten_lobs=()):
"""Handle reply messages from INSERT or UPDATE statements"""
self.description = None
self._received_last_resultset_part = True # set to 'True' so that cursor.fetch*() returns just empty list
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind in (part_kinds.TRANSACTIONFLAGS, part_kinds.STATEMENTCONTEXT, part_kinds.PARAMETERMETADATA):
pass
elif part.kind == part_kinds.WRITELOBREPLY:
# This part occurrs after lobs have been submitted not at all or only partially during an insert.
# In this case the parameter part of the Request message contains a list called 'unwritten_lobs'
# with LobBuffer instances.
# Those instances are in the same order as 'locator_ids' received in the reply message. These IDs
# are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs.
for lob_buffer, lob_locator_id in izip(unwritten_lobs, part.locator_ids):
# store locator_id in every lob buffer instance for later reference:
lob_buffer.locator_id = lob_locator_id
self._perform_lob_write_requests(unwritten_lobs)
else:
raise InterfaceError("Prepared insert statement response, unexpected part kind %d." % part.kind)
self._executed = True | [
"def",
"_handle_upsert",
"(",
"self",
",",
"parts",
",",
"unwritten_lobs",
"=",
"(",
")",
")",
":",
"self",
".",
"description",
"=",
"None",
"self",
".",
"_received_last_resultset_part",
"=",
"True",
"# set to 'True' so that cursor.fetch*() returns just empty list",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"ROWSAFFECTED",
":",
"self",
".",
"rowcount",
"=",
"part",
".",
"values",
"[",
"0",
"]",
"elif",
"part",
".",
"kind",
"in",
"(",
"part_kinds",
".",
"TRANSACTIONFLAGS",
",",
"part_kinds",
".",
"STATEMENTCONTEXT",
",",
"part_kinds",
".",
"PARAMETERMETADATA",
")",
":",
"pass",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"WRITELOBREPLY",
":",
"# This part occurrs after lobs have been submitted not at all or only partially during an insert.",
"# In this case the parameter part of the Request message contains a list called 'unwritten_lobs'",
"# with LobBuffer instances.",
"# Those instances are in the same order as 'locator_ids' received in the reply message. These IDs",
"# are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs.",
"for",
"lob_buffer",
",",
"lob_locator_id",
"in",
"izip",
"(",
"unwritten_lobs",
",",
"part",
".",
"locator_ids",
")",
":",
"# store locator_id in every lob buffer instance for later reference:",
"lob_buffer",
".",
"locator_id",
"=",
"lob_locator_id",
"self",
".",
"_perform_lob_write_requests",
"(",
"unwritten_lobs",
")",
"else",
":",
"raise",
"InterfaceError",
"(",
"\"Prepared insert statement response, unexpected part kind %d.\"",
"%",
"part",
".",
"kind",
")",
"self",
".",
"_executed",
"=",
"True"
] | Handle reply messages from INSERT or UPDATE statements | [
"Handle",
"reply",
"messages",
"from",
"INSERT",
"or",
"UPDATE",
"statements"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/cursor.py#L288-L310 | train | 232,541 |
SAP/PyHDB | pyhdb/cursor.py | Cursor._handle_select | def _handle_select(self, parts, result_metadata=None):
"""Handle reply messages from SELECT statements"""
self.rowcount = -1
if result_metadata is not None:
# Select was prepared and we can use the already received metadata
self.description, self._column_types = self._handle_result_metadata(result_metadata)
for part in parts:
if part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
elif part.kind in (part_kinds.STATEMENTCONTEXT, part_kinds.TRANSACTIONFLAGS, part_kinds.PARAMETERMETADATA):
pass
else:
raise InterfaceError("Prepared select statement response, unexpected part kind %d." % part.kind) | python | def _handle_select(self, parts, result_metadata=None):
"""Handle reply messages from SELECT statements"""
self.rowcount = -1
if result_metadata is not None:
# Select was prepared and we can use the already received metadata
self.description, self._column_types = self._handle_result_metadata(result_metadata)
for part in parts:
if part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
elif part.kind in (part_kinds.STATEMENTCONTEXT, part_kinds.TRANSACTIONFLAGS, part_kinds.PARAMETERMETADATA):
pass
else:
raise InterfaceError("Prepared select statement response, unexpected part kind %d." % part.kind) | [
"def",
"_handle_select",
"(",
"self",
",",
"parts",
",",
"result_metadata",
"=",
"None",
")",
":",
"self",
".",
"rowcount",
"=",
"-",
"1",
"if",
"result_metadata",
"is",
"not",
"None",
":",
"# Select was prepared and we can use the already received metadata",
"self",
".",
"description",
",",
"self",
".",
"_column_types",
"=",
"self",
".",
"_handle_result_metadata",
"(",
"result_metadata",
")",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"RESULTSETID",
":",
"self",
".",
"_resultset_id",
"=",
"part",
".",
"value",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"RESULTSETMETADATA",
":",
"self",
".",
"description",
",",
"self",
".",
"_column_types",
"=",
"self",
".",
"_handle_result_metadata",
"(",
"part",
")",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"RESULTSET",
":",
"self",
".",
"_buffer",
"=",
"part",
".",
"unpack_rows",
"(",
"self",
".",
"_column_types",
",",
"self",
".",
"connection",
")",
"self",
".",
"_received_last_resultset_part",
"=",
"part",
".",
"attribute",
"&",
"1",
"self",
".",
"_executed",
"=",
"True",
"elif",
"part",
".",
"kind",
"in",
"(",
"part_kinds",
".",
"STATEMENTCONTEXT",
",",
"part_kinds",
".",
"TRANSACTIONFLAGS",
",",
"part_kinds",
".",
"PARAMETERMETADATA",
")",
":",
"pass",
"else",
":",
"raise",
"InterfaceError",
"(",
"\"Prepared select statement response, unexpected part kind %d.\"",
"%",
"part",
".",
"kind",
")"
] | Handle reply messages from SELECT statements | [
"Handle",
"reply",
"messages",
"from",
"SELECT",
"statements"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/cursor.py#L328-L347 | train | 232,542 |
SAP/PyHDB | pyhdb/cursor.py | Cursor._handle_dbproc_call | def _handle_dbproc_call(self, parts, parameters_metadata):
"""Handle reply messages from STORED PROCEDURE statements"""
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind == part_kinds.TRANSACTIONFLAGS:
pass
elif part.kind == part_kinds.STATEMENTCONTEXT:
pass
elif part.kind == part_kinds.OUTPUTPARAMETERS:
self._buffer = part.unpack_rows(parameters_metadata, self.connection)
self._received_last_resultset_part = True
self._executed = True
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
else:
raise InterfaceError("Stored procedure call, unexpected part kind %d." % part.kind)
self._executed = True | python | def _handle_dbproc_call(self, parts, parameters_metadata):
"""Handle reply messages from STORED PROCEDURE statements"""
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind == part_kinds.TRANSACTIONFLAGS:
pass
elif part.kind == part_kinds.STATEMENTCONTEXT:
pass
elif part.kind == part_kinds.OUTPUTPARAMETERS:
self._buffer = part.unpack_rows(parameters_metadata, self.connection)
self._received_last_resultset_part = True
self._executed = True
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
else:
raise InterfaceError("Stored procedure call, unexpected part kind %d." % part.kind)
self._executed = True | [
"def",
"_handle_dbproc_call",
"(",
"self",
",",
"parts",
",",
"parameters_metadata",
")",
":",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"ROWSAFFECTED",
":",
"self",
".",
"rowcount",
"=",
"part",
".",
"values",
"[",
"0",
"]",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"TRANSACTIONFLAGS",
":",
"pass",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"STATEMENTCONTEXT",
":",
"pass",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"OUTPUTPARAMETERS",
":",
"self",
".",
"_buffer",
"=",
"part",
".",
"unpack_rows",
"(",
"parameters_metadata",
",",
"self",
".",
"connection",
")",
"self",
".",
"_received_last_resultset_part",
"=",
"True",
"self",
".",
"_executed",
"=",
"True",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"RESULTSETMETADATA",
":",
"self",
".",
"description",
",",
"self",
".",
"_column_types",
"=",
"self",
".",
"_handle_result_metadata",
"(",
"part",
")",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"RESULTSETID",
":",
"self",
".",
"_resultset_id",
"=",
"part",
".",
"value",
"elif",
"part",
".",
"kind",
"==",
"part_kinds",
".",
"RESULTSET",
":",
"self",
".",
"_buffer",
"=",
"part",
".",
"unpack_rows",
"(",
"self",
".",
"_column_types",
",",
"self",
".",
"connection",
")",
"self",
".",
"_received_last_resultset_part",
"=",
"part",
".",
"attribute",
"&",
"1",
"self",
".",
"_executed",
"=",
"True",
"else",
":",
"raise",
"InterfaceError",
"(",
"\"Stored procedure call, unexpected part kind %d.\"",
"%",
"part",
".",
"kind",
")",
"self",
".",
"_executed",
"=",
"True"
] | Handle reply messages from STORED PROCEDURE statements | [
"Handle",
"reply",
"messages",
"from",
"STORED",
"PROCEDURE",
"statements"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/cursor.py#L349-L372 | train | 232,543 |
SAP/PyHDB | pyhdb/lib/stringlib.py | allhexlify | def allhexlify(data):
"""Hexlify given data into a string representation with hex values for all chars
Input like
'ab\x04ce'
becomes
'\x61\x62\x04\x63\x65'
"""
hx = binascii.hexlify(data)
return b''.join([b'\\x' + o for o in re.findall(b'..', hx)]) | python | def allhexlify(data):
"""Hexlify given data into a string representation with hex values for all chars
Input like
'ab\x04ce'
becomes
'\x61\x62\x04\x63\x65'
"""
hx = binascii.hexlify(data)
return b''.join([b'\\x' + o for o in re.findall(b'..', hx)]) | [
"def",
"allhexlify",
"(",
"data",
")",
":",
"hx",
"=",
"binascii",
".",
"hexlify",
"(",
"data",
")",
"return",
"b''",
".",
"join",
"(",
"[",
"b'\\\\x'",
"+",
"o",
"for",
"o",
"in",
"re",
".",
"findall",
"(",
"b'..'",
",",
"hx",
")",
"]",
")"
] | Hexlify given data into a string representation with hex values for all chars
Input like
'ab\x04ce'
becomes
'\x61\x62\x04\x63\x65' | [
"Hexlify",
"given",
"data",
"into",
"a",
"string",
"representation",
"with",
"hex",
"values",
"for",
"all",
"chars",
"Input",
"like",
"ab",
"\\",
"x04ce",
"becomes",
"\\",
"x61",
"\\",
"x62",
"\\",
"x04",
"\\",
"x63",
"\\",
"x65"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/lib/stringlib.py#L19-L27 | train | 232,544 |
SAP/PyHDB | pyhdb/protocol/parts.py | Part.pack | def pack(self, remaining_size):
"""Pack data of part into binary format"""
arguments_count, payload = self.pack_data(remaining_size - self.header_size)
payload_length = len(payload)
# align payload length to multiple of 8
if payload_length % 8 != 0:
payload += b"\x00" * (8 - payload_length % 8)
self.header = PartHeader(self.kind, self.attribute, arguments_count, self.bigargumentcount,
payload_length, remaining_size)
hdr = self.header_struct.pack(*self.header)
if pyhdb.tracing:
self.trace_header = humanhexlify(hdr, 30)
self.trace_payload = humanhexlify(payload, 30)
return hdr + payload | python | def pack(self, remaining_size):
"""Pack data of part into binary format"""
arguments_count, payload = self.pack_data(remaining_size - self.header_size)
payload_length = len(payload)
# align payload length to multiple of 8
if payload_length % 8 != 0:
payload += b"\x00" * (8 - payload_length % 8)
self.header = PartHeader(self.kind, self.attribute, arguments_count, self.bigargumentcount,
payload_length, remaining_size)
hdr = self.header_struct.pack(*self.header)
if pyhdb.tracing:
self.trace_header = humanhexlify(hdr, 30)
self.trace_payload = humanhexlify(payload, 30)
return hdr + payload | [
"def",
"pack",
"(",
"self",
",",
"remaining_size",
")",
":",
"arguments_count",
",",
"payload",
"=",
"self",
".",
"pack_data",
"(",
"remaining_size",
"-",
"self",
".",
"header_size",
")",
"payload_length",
"=",
"len",
"(",
"payload",
")",
"# align payload length to multiple of 8",
"if",
"payload_length",
"%",
"8",
"!=",
"0",
":",
"payload",
"+=",
"b\"\\x00\"",
"*",
"(",
"8",
"-",
"payload_length",
"%",
"8",
")",
"self",
".",
"header",
"=",
"PartHeader",
"(",
"self",
".",
"kind",
",",
"self",
".",
"attribute",
",",
"arguments_count",
",",
"self",
".",
"bigargumentcount",
",",
"payload_length",
",",
"remaining_size",
")",
"hdr",
"=",
"self",
".",
"header_struct",
".",
"pack",
"(",
"*",
"self",
".",
"header",
")",
"if",
"pyhdb",
".",
"tracing",
":",
"self",
".",
"trace_header",
"=",
"humanhexlify",
"(",
"hdr",
",",
"30",
")",
"self",
".",
"trace_payload",
"=",
"humanhexlify",
"(",
"payload",
",",
"30",
")",
"return",
"hdr",
"+",
"payload"
] | Pack data of part into binary format | [
"Pack",
"data",
"of",
"part",
"into",
"binary",
"format"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/parts.py#L101-L116 | train | 232,545 |
SAP/PyHDB | pyhdb/protocol/parts.py | Part.unpack_from | def unpack_from(cls, payload, expected_parts):
"""Unpack parts from payload"""
for num_part in iter_range(expected_parts):
hdr = payload.read(cls.header_size)
try:
part_header = PartHeader(*cls.header_struct.unpack(hdr))
except struct.error:
raise InterfaceError("No valid part header")
if part_header.payload_size % 8 != 0:
part_payload_size = part_header.payload_size + 8 - (part_header.payload_size % 8)
else:
part_payload_size = part_header.payload_size
pl = payload.read(part_payload_size)
part_payload = io.BytesIO(pl)
try:
_PartClass = PART_MAPPING[part_header.part_kind]
except KeyError:
raise InterfaceError("Unknown part kind %s" % part_header.part_kind)
debug('%s (%d/%d): %s', _PartClass.__name__, num_part+1, expected_parts, str(part_header))
debug('Read %d bytes payload for part %d', part_payload_size, num_part + 1)
init_arguments = _PartClass.unpack_data(part_header.argument_count, part_payload)
debug('Part data: %s', init_arguments)
part = _PartClass(*init_arguments)
part.header = part_header
part.attribute = part_header.part_attributes
part.source = 'server'
if pyhdb.tracing:
part.trace_header = humanhexlify(hdr[:part_header.payload_size])
part.trace_payload = humanhexlify(pl, 30)
yield part | python | def unpack_from(cls, payload, expected_parts):
"""Unpack parts from payload"""
for num_part in iter_range(expected_parts):
hdr = payload.read(cls.header_size)
try:
part_header = PartHeader(*cls.header_struct.unpack(hdr))
except struct.error:
raise InterfaceError("No valid part header")
if part_header.payload_size % 8 != 0:
part_payload_size = part_header.payload_size + 8 - (part_header.payload_size % 8)
else:
part_payload_size = part_header.payload_size
pl = payload.read(part_payload_size)
part_payload = io.BytesIO(pl)
try:
_PartClass = PART_MAPPING[part_header.part_kind]
except KeyError:
raise InterfaceError("Unknown part kind %s" % part_header.part_kind)
debug('%s (%d/%d): %s', _PartClass.__name__, num_part+1, expected_parts, str(part_header))
debug('Read %d bytes payload for part %d', part_payload_size, num_part + 1)
init_arguments = _PartClass.unpack_data(part_header.argument_count, part_payload)
debug('Part data: %s', init_arguments)
part = _PartClass(*init_arguments)
part.header = part_header
part.attribute = part_header.part_attributes
part.source = 'server'
if pyhdb.tracing:
part.trace_header = humanhexlify(hdr[:part_header.payload_size])
part.trace_payload = humanhexlify(pl, 30)
yield part | [
"def",
"unpack_from",
"(",
"cls",
",",
"payload",
",",
"expected_parts",
")",
":",
"for",
"num_part",
"in",
"iter_range",
"(",
"expected_parts",
")",
":",
"hdr",
"=",
"payload",
".",
"read",
"(",
"cls",
".",
"header_size",
")",
"try",
":",
"part_header",
"=",
"PartHeader",
"(",
"*",
"cls",
".",
"header_struct",
".",
"unpack",
"(",
"hdr",
")",
")",
"except",
"struct",
".",
"error",
":",
"raise",
"InterfaceError",
"(",
"\"No valid part header\"",
")",
"if",
"part_header",
".",
"payload_size",
"%",
"8",
"!=",
"0",
":",
"part_payload_size",
"=",
"part_header",
".",
"payload_size",
"+",
"8",
"-",
"(",
"part_header",
".",
"payload_size",
"%",
"8",
")",
"else",
":",
"part_payload_size",
"=",
"part_header",
".",
"payload_size",
"pl",
"=",
"payload",
".",
"read",
"(",
"part_payload_size",
")",
"part_payload",
"=",
"io",
".",
"BytesIO",
"(",
"pl",
")",
"try",
":",
"_PartClass",
"=",
"PART_MAPPING",
"[",
"part_header",
".",
"part_kind",
"]",
"except",
"KeyError",
":",
"raise",
"InterfaceError",
"(",
"\"Unknown part kind %s\"",
"%",
"part_header",
".",
"part_kind",
")",
"debug",
"(",
"'%s (%d/%d): %s'",
",",
"_PartClass",
".",
"__name__",
",",
"num_part",
"+",
"1",
",",
"expected_parts",
",",
"str",
"(",
"part_header",
")",
")",
"debug",
"(",
"'Read %d bytes payload for part %d'",
",",
"part_payload_size",
",",
"num_part",
"+",
"1",
")",
"init_arguments",
"=",
"_PartClass",
".",
"unpack_data",
"(",
"part_header",
".",
"argument_count",
",",
"part_payload",
")",
"debug",
"(",
"'Part data: %s'",
",",
"init_arguments",
")",
"part",
"=",
"_PartClass",
"(",
"*",
"init_arguments",
")",
"part",
".",
"header",
"=",
"part_header",
"part",
".",
"attribute",
"=",
"part_header",
".",
"part_attributes",
"part",
".",
"source",
"=",
"'server'",
"if",
"pyhdb",
".",
"tracing",
":",
"part",
".",
"trace_header",
"=",
"humanhexlify",
"(",
"hdr",
"[",
":",
"part_header",
".",
"payload_size",
"]",
")",
"part",
".",
"trace_payload",
"=",
"humanhexlify",
"(",
"pl",
",",
"30",
")",
"yield",
"part"
] | Unpack parts from payload | [
"Unpack",
"parts",
"from",
"payload"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/parts.py#L122-L155 | train | 232,546 |
SAP/PyHDB | pyhdb/protocol/parts.py | ReadLobRequest.pack_data | def pack_data(self, remaining_size):
"""Pack data. readoffset has to be increased by one, seems like HANA starts from 1, not zero."""
payload = self.part_struct.pack(self.locator_id, self.readoffset + 1, self.readlength, b' ')
return 4, payload | python | def pack_data(self, remaining_size):
"""Pack data. readoffset has to be increased by one, seems like HANA starts from 1, not zero."""
payload = self.part_struct.pack(self.locator_id, self.readoffset + 1, self.readlength, b' ')
return 4, payload | [
"def",
"pack_data",
"(",
"self",
",",
"remaining_size",
")",
":",
"payload",
"=",
"self",
".",
"part_struct",
".",
"pack",
"(",
"self",
".",
"locator_id",
",",
"self",
".",
"readoffset",
"+",
"1",
",",
"self",
".",
"readlength",
",",
"b' '",
")",
"return",
"4",
",",
"payload"
] | Pack data. readoffset has to be increased by one, seems like HANA starts from 1, not zero. | [
"Pack",
"data",
".",
"readoffset",
"has",
"to",
"be",
"increased",
"by",
"one",
"seems",
"like",
"HANA",
"starts",
"from",
"1",
"not",
"zero",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/parts.py#L338-L341 | train | 232,547 |
SAP/PyHDB | pyhdb/protocol/message.py | RequestMessage.build_payload | def build_payload(self, payload):
""" Build payload of message. """
for segment in self.segments:
segment.pack(payload, commit=self.autocommit) | python | def build_payload(self, payload):
""" Build payload of message. """
for segment in self.segments:
segment.pack(payload, commit=self.autocommit) | [
"def",
"build_payload",
"(",
"self",
",",
"payload",
")",
":",
"for",
"segment",
"in",
"self",
".",
"segments",
":",
"segment",
".",
"pack",
"(",
"payload",
",",
"commit",
"=",
"self",
".",
"autocommit",
")"
] | Build payload of message. | [
"Build",
"payload",
"of",
"message",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/message.py#L42-L45 | train | 232,548 |
SAP/PyHDB | pyhdb/protocol/message.py | RequestMessage.pack | def pack(self):
""" Pack message to binary stream. """
payload = io.BytesIO()
# Advance num bytes equal to header size - the header is written later
# after the payload of all segments and parts has been written:
payload.seek(self.header_size, io.SEEK_CUR)
# Write out payload of segments and parts:
self.build_payload(payload)
packet_length = len(payload.getvalue()) - self.header_size
self.header = MessageHeader(self.session_id, self.packet_count, packet_length, constants.MAX_SEGMENT_SIZE,
num_segments=len(self.segments), packet_options=0)
packed_header = self.header_struct.pack(*self.header)
# Go back to begining of payload for writing message header:
payload.seek(0)
payload.write(packed_header)
payload.seek(0, io.SEEK_END)
trace(self)
return payload | python | def pack(self):
""" Pack message to binary stream. """
payload = io.BytesIO()
# Advance num bytes equal to header size - the header is written later
# after the payload of all segments and parts has been written:
payload.seek(self.header_size, io.SEEK_CUR)
# Write out payload of segments and parts:
self.build_payload(payload)
packet_length = len(payload.getvalue()) - self.header_size
self.header = MessageHeader(self.session_id, self.packet_count, packet_length, constants.MAX_SEGMENT_SIZE,
num_segments=len(self.segments), packet_options=0)
packed_header = self.header_struct.pack(*self.header)
# Go back to begining of payload for writing message header:
payload.seek(0)
payload.write(packed_header)
payload.seek(0, io.SEEK_END)
trace(self)
return payload | [
"def",
"pack",
"(",
"self",
")",
":",
"payload",
"=",
"io",
".",
"BytesIO",
"(",
")",
"# Advance num bytes equal to header size - the header is written later",
"# after the payload of all segments and parts has been written:",
"payload",
".",
"seek",
"(",
"self",
".",
"header_size",
",",
"io",
".",
"SEEK_CUR",
")",
"# Write out payload of segments and parts:",
"self",
".",
"build_payload",
"(",
"payload",
")",
"packet_length",
"=",
"len",
"(",
"payload",
".",
"getvalue",
"(",
")",
")",
"-",
"self",
".",
"header_size",
"self",
".",
"header",
"=",
"MessageHeader",
"(",
"self",
".",
"session_id",
",",
"self",
".",
"packet_count",
",",
"packet_length",
",",
"constants",
".",
"MAX_SEGMENT_SIZE",
",",
"num_segments",
"=",
"len",
"(",
"self",
".",
"segments",
")",
",",
"packet_options",
"=",
"0",
")",
"packed_header",
"=",
"self",
".",
"header_struct",
".",
"pack",
"(",
"*",
"self",
".",
"header",
")",
"# Go back to begining of payload for writing message header:",
"payload",
".",
"seek",
"(",
"0",
")",
"payload",
".",
"write",
"(",
"packed_header",
")",
"payload",
".",
"seek",
"(",
"0",
",",
"io",
".",
"SEEK_END",
")",
"trace",
"(",
"self",
")",
"return",
"payload"
] | Pack message to binary stream. | [
"Pack",
"message",
"to",
"binary",
"stream",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/message.py#L47-L69 | train | 232,549 |
serge-sans-paille/pythran | pythran/syntax.py | check_specs | def check_specs(specs, renamings, types):
'''
Does nothing but raising PythranSyntaxError if specs
are incompatible with the actual code
'''
from pythran.types.tog import unify, clone, tr
from pythran.types.tog import Function, TypeVariable, InferenceError
functions = {renamings.get(k, k): v for k, v in specs.functions.items()}
for fname, signatures in functions.items():
ftype = types[fname]
for signature in signatures:
sig_type = Function([tr(p) for p in signature], TypeVariable())
try:
unify(clone(sig_type), clone(ftype))
except InferenceError:
raise PythranSyntaxError(
"Specification for `{}` does not match inferred type:\n"
"expected `{}`\n"
"got `Callable[[{}], ...]`".format(
fname,
ftype,
", ".join(map(str, sig_type.types[:-1])))
) | python | def check_specs(specs, renamings, types):
'''
Does nothing but raising PythranSyntaxError if specs
are incompatible with the actual code
'''
from pythran.types.tog import unify, clone, tr
from pythran.types.tog import Function, TypeVariable, InferenceError
functions = {renamings.get(k, k): v for k, v in specs.functions.items()}
for fname, signatures in functions.items():
ftype = types[fname]
for signature in signatures:
sig_type = Function([tr(p) for p in signature], TypeVariable())
try:
unify(clone(sig_type), clone(ftype))
except InferenceError:
raise PythranSyntaxError(
"Specification for `{}` does not match inferred type:\n"
"expected `{}`\n"
"got `Callable[[{}], ...]`".format(
fname,
ftype,
", ".join(map(str, sig_type.types[:-1])))
) | [
"def",
"check_specs",
"(",
"specs",
",",
"renamings",
",",
"types",
")",
":",
"from",
"pythran",
".",
"types",
".",
"tog",
"import",
"unify",
",",
"clone",
",",
"tr",
"from",
"pythran",
".",
"types",
".",
"tog",
"import",
"Function",
",",
"TypeVariable",
",",
"InferenceError",
"functions",
"=",
"{",
"renamings",
".",
"get",
"(",
"k",
",",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"specs",
".",
"functions",
".",
"items",
"(",
")",
"}",
"for",
"fname",
",",
"signatures",
"in",
"functions",
".",
"items",
"(",
")",
":",
"ftype",
"=",
"types",
"[",
"fname",
"]",
"for",
"signature",
"in",
"signatures",
":",
"sig_type",
"=",
"Function",
"(",
"[",
"tr",
"(",
"p",
")",
"for",
"p",
"in",
"signature",
"]",
",",
"TypeVariable",
"(",
")",
")",
"try",
":",
"unify",
"(",
"clone",
"(",
"sig_type",
")",
",",
"clone",
"(",
"ftype",
")",
")",
"except",
"InferenceError",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Specification for `{}` does not match inferred type:\\n\"",
"\"expected `{}`\\n\"",
"\"got `Callable[[{}], ...]`\"",
".",
"format",
"(",
"fname",
",",
"ftype",
",",
"\", \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"sig_type",
".",
"types",
"[",
":",
"-",
"1",
"]",
")",
")",
")",
")"
] | Does nothing but raising PythranSyntaxError if specs
are incompatible with the actual code | [
"Does",
"nothing",
"but",
"raising",
"PythranSyntaxError",
"if",
"specs",
"are",
"incompatible",
"with",
"the",
"actual",
"code"
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/syntax.py#L190-L213 | train | 232,550 |
serge-sans-paille/pythran | pythran/syntax.py | check_exports | def check_exports(mod, specs, renamings):
'''
Does nothing but raising PythranSyntaxError if specs
references an undefined global
'''
functions = {renamings.get(k, k): v for k, v in specs.functions.items()}
mod_functions = {node.name: node for node in mod.body
if isinstance(node, ast.FunctionDef)}
for fname, signatures in functions.items():
try:
fnode = mod_functions[fname]
except KeyError:
raise PythranSyntaxError(
"Invalid spec: exporting undefined function `{}`"
.format(fname))
for signature in signatures:
args_count = len(fnode.args.args)
if len(signature) > args_count:
raise PythranSyntaxError(
"Too many arguments when exporting `{}`"
.format(fname))
elif len(signature) < args_count - len(fnode.args.defaults):
raise PythranSyntaxError(
"Not enough arguments when exporting `{}`"
.format(fname)) | python | def check_exports(mod, specs, renamings):
'''
Does nothing but raising PythranSyntaxError if specs
references an undefined global
'''
functions = {renamings.get(k, k): v for k, v in specs.functions.items()}
mod_functions = {node.name: node for node in mod.body
if isinstance(node, ast.FunctionDef)}
for fname, signatures in functions.items():
try:
fnode = mod_functions[fname]
except KeyError:
raise PythranSyntaxError(
"Invalid spec: exporting undefined function `{}`"
.format(fname))
for signature in signatures:
args_count = len(fnode.args.args)
if len(signature) > args_count:
raise PythranSyntaxError(
"Too many arguments when exporting `{}`"
.format(fname))
elif len(signature) < args_count - len(fnode.args.defaults):
raise PythranSyntaxError(
"Not enough arguments when exporting `{}`"
.format(fname)) | [
"def",
"check_exports",
"(",
"mod",
",",
"specs",
",",
"renamings",
")",
":",
"functions",
"=",
"{",
"renamings",
".",
"get",
"(",
"k",
",",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"specs",
".",
"functions",
".",
"items",
"(",
")",
"}",
"mod_functions",
"=",
"{",
"node",
".",
"name",
":",
"node",
"for",
"node",
"in",
"mod",
".",
"body",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"FunctionDef",
")",
"}",
"for",
"fname",
",",
"signatures",
"in",
"functions",
".",
"items",
"(",
")",
":",
"try",
":",
"fnode",
"=",
"mod_functions",
"[",
"fname",
"]",
"except",
"KeyError",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Invalid spec: exporting undefined function `{}`\"",
".",
"format",
"(",
"fname",
")",
")",
"for",
"signature",
"in",
"signatures",
":",
"args_count",
"=",
"len",
"(",
"fnode",
".",
"args",
".",
"args",
")",
"if",
"len",
"(",
"signature",
")",
">",
"args_count",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Too many arguments when exporting `{}`\"",
".",
"format",
"(",
"fname",
")",
")",
"elif",
"len",
"(",
"signature",
")",
"<",
"args_count",
"-",
"len",
"(",
"fnode",
".",
"args",
".",
"defaults",
")",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Not enough arguments when exporting `{}`\"",
".",
"format",
"(",
"fname",
")",
")"
] | Does nothing but raising PythranSyntaxError if specs
references an undefined global | [
"Does",
"nothing",
"but",
"raising",
"PythranSyntaxError",
"if",
"specs",
"references",
"an",
"undefined",
"global"
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/syntax.py#L216-L242 | train | 232,551 |
serge-sans-paille/pythran | pythran/syntax.py | SyntaxChecker.visit_Import | def visit_Import(self, node):
""" Check if imported module exists in MODULES. """
for alias in node.names:
current_module = MODULES
# Recursive check for submodules
for path in alias.name.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(alias.name),
node)
else:
current_module = current_module[path] | python | def visit_Import(self, node):
""" Check if imported module exists in MODULES. """
for alias in node.names:
current_module = MODULES
# Recursive check for submodules
for path in alias.name.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(alias.name),
node)
else:
current_module = current_module[path] | [
"def",
"visit_Import",
"(",
"self",
",",
"node",
")",
":",
"for",
"alias",
"in",
"node",
".",
"names",
":",
"current_module",
"=",
"MODULES",
"# Recursive check for submodules",
"for",
"path",
"in",
"alias",
".",
"name",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"path",
"not",
"in",
"current_module",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Module '{0}' unknown.\"",
".",
"format",
"(",
"alias",
".",
"name",
")",
",",
"node",
")",
"else",
":",
"current_module",
"=",
"current_module",
"[",
"path",
"]"
] | Check if imported module exists in MODULES. | [
"Check",
"if",
"imported",
"module",
"exists",
"in",
"MODULES",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/syntax.py#L129-L140 | train | 232,552 |
serge-sans-paille/pythran | pythran/syntax.py | SyntaxChecker.visit_ImportFrom | def visit_ImportFrom(self, node):
"""
Check validity of imported functions.
Check:
- no level specific value are provided.
- a module is provided
- module/submodule exists in MODULES
- imported function exists in the given module/submodule
"""
if node.level:
raise PythranSyntaxError("Relative import not supported", node)
if not node.module:
raise PythranSyntaxError("import from without module", node)
module = node.module
current_module = MODULES
# Check if module exists
for path in module.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(module),
node)
else:
current_module = current_module[path]
# Check if imported functions exist
for alias in node.names:
if alias.name == '*':
continue
elif alias.name not in current_module:
raise PythranSyntaxError(
"identifier '{0}' not found in module '{1}'".format(
alias.name,
module),
node) | python | def visit_ImportFrom(self, node):
"""
Check validity of imported functions.
Check:
- no level specific value are provided.
- a module is provided
- module/submodule exists in MODULES
- imported function exists in the given module/submodule
"""
if node.level:
raise PythranSyntaxError("Relative import not supported", node)
if not node.module:
raise PythranSyntaxError("import from without module", node)
module = node.module
current_module = MODULES
# Check if module exists
for path in module.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(module),
node)
else:
current_module = current_module[path]
# Check if imported functions exist
for alias in node.names:
if alias.name == '*':
continue
elif alias.name not in current_module:
raise PythranSyntaxError(
"identifier '{0}' not found in module '{1}'".format(
alias.name,
module),
node) | [
"def",
"visit_ImportFrom",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"level",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Relative import not supported\"",
",",
"node",
")",
"if",
"not",
"node",
".",
"module",
":",
"raise",
"PythranSyntaxError",
"(",
"\"import from without module\"",
",",
"node",
")",
"module",
"=",
"node",
".",
"module",
"current_module",
"=",
"MODULES",
"# Check if module exists",
"for",
"path",
"in",
"module",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"path",
"not",
"in",
"current_module",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Module '{0}' unknown.\"",
".",
"format",
"(",
"module",
")",
",",
"node",
")",
"else",
":",
"current_module",
"=",
"current_module",
"[",
"path",
"]",
"# Check if imported functions exist",
"for",
"alias",
"in",
"node",
".",
"names",
":",
"if",
"alias",
".",
"name",
"==",
"'*'",
":",
"continue",
"elif",
"alias",
".",
"name",
"not",
"in",
"current_module",
":",
"raise",
"PythranSyntaxError",
"(",
"\"identifier '{0}' not found in module '{1}'\"",
".",
"format",
"(",
"alias",
".",
"name",
",",
"module",
")",
",",
"node",
")"
] | Check validity of imported functions.
Check:
- no level specific value are provided.
- a module is provided
- module/submodule exists in MODULES
- imported function exists in the given module/submodule | [
"Check",
"validity",
"of",
"imported",
"functions",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/syntax.py#L142-L176 | train | 232,553 |
serge-sans-paille/pythran | pythran/passmanager.py | uncamel | def uncamel(name):
"""Transform CamelCase naming convention into C-ish convention."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | python | def uncamel(name):
"""Transform CamelCase naming convention into C-ish convention."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | [
"def",
"uncamel",
"(",
"name",
")",
":",
"s1",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"name",
")",
"return",
"re",
".",
"sub",
"(",
"'([a-z0-9])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")"
] | Transform CamelCase naming convention into C-ish convention. | [
"Transform",
"CamelCase",
"naming",
"convention",
"into",
"C",
"-",
"ish",
"convention",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L19-L22 | train | 232,554 |
serge-sans-paille/pythran | pythran/passmanager.py | ContextManager.verify_dependencies | def verify_dependencies(self):
"""
Checks no analysis are called before a transformation,
as the transformation could invalidate the analysis.
"""
for i in range(1, len(self.deps)):
assert(not (isinstance(self.deps[i], Transformation) and
isinstance(self.deps[i - 1], Analysis))
), "invalid dep order for %s" % self | python | def verify_dependencies(self):
"""
Checks no analysis are called before a transformation,
as the transformation could invalidate the analysis.
"""
for i in range(1, len(self.deps)):
assert(not (isinstance(self.deps[i], Transformation) and
isinstance(self.deps[i - 1], Analysis))
), "invalid dep order for %s" % self | [
"def",
"verify_dependencies",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"deps",
")",
")",
":",
"assert",
"(",
"not",
"(",
"isinstance",
"(",
"self",
".",
"deps",
"[",
"i",
"]",
",",
"Transformation",
")",
"and",
"isinstance",
"(",
"self",
".",
"deps",
"[",
"i",
"-",
"1",
"]",
",",
"Analysis",
")",
")",
")",
",",
"\"invalid dep order for %s\"",
"%",
"self"
] | Checks no analysis are called before a transformation,
as the transformation could invalidate the analysis. | [
"Checks",
"no",
"analysis",
"are",
"called",
"before",
"a",
"transformation",
"as",
"the",
"transformation",
"could",
"invalidate",
"the",
"analysis",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L58-L67 | train | 232,555 |
serge-sans-paille/pythran | pythran/passmanager.py | ContextManager.prepare | def prepare(self, node):
'''Gather analysis result required by this analysis'''
if isinstance(node, ast.Module):
self.ctx.module = node
elif isinstance(node, ast.FunctionDef):
self.ctx.function = node
for D in self.deps:
d = D()
d.attach(self.passmanager, self.ctx)
result = d.run(node)
setattr(self, uncamel(D.__name__), result) | python | def prepare(self, node):
'''Gather analysis result required by this analysis'''
if isinstance(node, ast.Module):
self.ctx.module = node
elif isinstance(node, ast.FunctionDef):
self.ctx.function = node
for D in self.deps:
d = D()
d.attach(self.passmanager, self.ctx)
result = d.run(node)
setattr(self, uncamel(D.__name__), result) | [
"def",
"prepare",
"(",
"self",
",",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Module",
")",
":",
"self",
".",
"ctx",
".",
"module",
"=",
"node",
"elif",
"isinstance",
"(",
"node",
",",
"ast",
".",
"FunctionDef",
")",
":",
"self",
".",
"ctx",
".",
"function",
"=",
"node",
"for",
"D",
"in",
"self",
".",
"deps",
":",
"d",
"=",
"D",
"(",
")",
"d",
".",
"attach",
"(",
"self",
".",
"passmanager",
",",
"self",
".",
"ctx",
")",
"result",
"=",
"d",
".",
"run",
"(",
"node",
")",
"setattr",
"(",
"self",
",",
"uncamel",
"(",
"D",
".",
"__name__",
")",
",",
"result",
")"
] | Gather analysis result required by this analysis | [
"Gather",
"analysis",
"result",
"required",
"by",
"this",
"analysis"
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L80-L91 | train | 232,556 |
serge-sans-paille/pythran | pythran/passmanager.py | Transformation.run | def run(self, node):
""" Apply transformation and dependencies and fix new node location."""
n = super(Transformation, self).run(node)
if self.update:
ast.fix_missing_locations(n)
self.passmanager._cache.clear()
return n | python | def run(self, node):
""" Apply transformation and dependencies and fix new node location."""
n = super(Transformation, self).run(node)
if self.update:
ast.fix_missing_locations(n)
self.passmanager._cache.clear()
return n | [
"def",
"run",
"(",
"self",
",",
"node",
")",
":",
"n",
"=",
"super",
"(",
"Transformation",
",",
"self",
")",
".",
"run",
"(",
"node",
")",
"if",
"self",
".",
"update",
":",
"ast",
".",
"fix_missing_locations",
"(",
"n",
")",
"self",
".",
"passmanager",
".",
"_cache",
".",
"clear",
"(",
")",
"return",
"n"
] | Apply transformation and dependencies and fix new node location. | [
"Apply",
"transformation",
"and",
"dependencies",
"and",
"fix",
"new",
"node",
"location",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L183-L189 | train | 232,557 |
serge-sans-paille/pythran | pythran/passmanager.py | Transformation.apply | def apply(self, node):
""" Apply transformation and return if an update happened. """
new_node = self.run(node)
return self.update, new_node | python | def apply(self, node):
""" Apply transformation and return if an update happened. """
new_node = self.run(node)
return self.update, new_node | [
"def",
"apply",
"(",
"self",
",",
"node",
")",
":",
"new_node",
"=",
"self",
".",
"run",
"(",
"node",
")",
"return",
"self",
".",
"update",
",",
"new_node"
] | Apply transformation and return if an update happened. | [
"Apply",
"transformation",
"and",
"return",
"if",
"an",
"update",
"happened",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L191-L194 | train | 232,558 |
serge-sans-paille/pythran | pythran/passmanager.py | PassManager.gather | def gather(self, analysis, node):
"High-level function to call an `analysis' on a `node'"
assert issubclass(analysis, Analysis)
a = analysis()
a.attach(self)
return a.run(node) | python | def gather(self, analysis, node):
"High-level function to call an `analysis' on a `node'"
assert issubclass(analysis, Analysis)
a = analysis()
a.attach(self)
return a.run(node) | [
"def",
"gather",
"(",
"self",
",",
"analysis",
",",
"node",
")",
":",
"assert",
"issubclass",
"(",
"analysis",
",",
"Analysis",
")",
"a",
"=",
"analysis",
"(",
")",
"a",
".",
"attach",
"(",
"self",
")",
"return",
"a",
".",
"run",
"(",
"node",
")"
] | High-level function to call an `analysis' on a `node | [
"High",
"-",
"level",
"function",
"to",
"call",
"an",
"analysis",
"on",
"a",
"node"
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L206-L211 | train | 232,559 |
serge-sans-paille/pythran | pythran/passmanager.py | PassManager.dump | def dump(self, backend, node):
'''High-level function to call a `backend' on a `node' to generate
code for module `module_name'.'''
assert issubclass(backend, Backend)
b = backend()
b.attach(self)
return b.run(node) | python | def dump(self, backend, node):
'''High-level function to call a `backend' on a `node' to generate
code for module `module_name'.'''
assert issubclass(backend, Backend)
b = backend()
b.attach(self)
return b.run(node) | [
"def",
"dump",
"(",
"self",
",",
"backend",
",",
"node",
")",
":",
"assert",
"issubclass",
"(",
"backend",
",",
"Backend",
")",
"b",
"=",
"backend",
"(",
")",
"b",
".",
"attach",
"(",
"self",
")",
"return",
"b",
".",
"run",
"(",
"node",
")"
] | High-level function to call a `backend' on a `node' to generate
code for module `module_name'. | [
"High",
"-",
"level",
"function",
"to",
"call",
"a",
"backend",
"on",
"a",
"node",
"to",
"generate",
"code",
"for",
"module",
"module_name",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L213-L219 | train | 232,560 |
serge-sans-paille/pythran | pythran/passmanager.py | PassManager.apply | def apply(self, transformation, node):
'''
High-level function to call a `transformation' on a `node'.
If the transformation is an analysis, the result of the analysis
is displayed.
'''
assert issubclass(transformation, (Transformation, Analysis))
a = transformation()
a.attach(self)
res = a.apply(node)
# the transformation updated the AST, so analyse may need to be rerun
# we could use a finer-grain caching system, and provide a way to flag
# some analyses as `unmodified' by the transformation, as done in LLVM
# (and PIPS ;-)
if a.update:
self._cache.clear()
return res | python | def apply(self, transformation, node):
'''
High-level function to call a `transformation' on a `node'.
If the transformation is an analysis, the result of the analysis
is displayed.
'''
assert issubclass(transformation, (Transformation, Analysis))
a = transformation()
a.attach(self)
res = a.apply(node)
# the transformation updated the AST, so analyse may need to be rerun
# we could use a finer-grain caching system, and provide a way to flag
# some analyses as `unmodified' by the transformation, as done in LLVM
# (and PIPS ;-)
if a.update:
self._cache.clear()
return res | [
"def",
"apply",
"(",
"self",
",",
"transformation",
",",
"node",
")",
":",
"assert",
"issubclass",
"(",
"transformation",
",",
"(",
"Transformation",
",",
"Analysis",
")",
")",
"a",
"=",
"transformation",
"(",
")",
"a",
".",
"attach",
"(",
"self",
")",
"res",
"=",
"a",
".",
"apply",
"(",
"node",
")",
"# the transformation updated the AST, so analyse may need to be rerun",
"# we could use a finer-grain caching system, and provide a way to flag",
"# some analyses as `unmodified' by the transformation, as done in LLVM",
"# (and PIPS ;-)",
"if",
"a",
".",
"update",
":",
"self",
".",
"_cache",
".",
"clear",
"(",
")",
"return",
"res"
] | High-level function to call a `transformation' on a `node'.
If the transformation is an analysis, the result of the analysis
is displayed. | [
"High",
"-",
"level",
"function",
"to",
"call",
"a",
"transformation",
"on",
"a",
"node",
".",
"If",
"the",
"transformation",
"is",
"an",
"analysis",
"the",
"result",
"of",
"the",
"analysis",
"is",
"displayed",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L221-L238 | train | 232,561 |
serge-sans-paille/pythran | pythran/types/conversion.py | pytype_to_ctype | def pytype_to_ctype(t):
""" Python -> pythonic type binding. """
if isinstance(t, List):
return 'pythonic::types::list<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Set):
return 'pythonic::types::set<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Dict):
tkey, tvalue = t.__args__
return 'pythonic::types::dict<{0},{1}>'.format(pytype_to_ctype(tkey),
pytype_to_ctype(tvalue))
elif isinstance(t, Tuple):
return 'decltype(pythonic::types::make_tuple({0}))'.format(
", ".join('std::declval<{}>()'.format(pytype_to_ctype(p))
for p in t.__args__)
)
elif isinstance(t, NDArray):
dtype = pytype_to_ctype(t.__args__[0])
ndim = len(t.__args__) - 1
shapes = ','.join(('long'
if s.stop == -1 or s.stop is None
else 'std::integral_constant<long, {}>'.format(
s.stop)
) for s in t.__args__[1:])
pshape = 'pythonic::types::pshape<{0}>'.format(shapes)
arr = 'pythonic::types::ndarray<{0},{1}>'.format(
dtype, pshape)
if t.__args__[1].start == -1:
return 'pythonic::types::numpy_texpr<{0}>'.format(arr)
elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]):
slices = ", ".join(['pythonic::types::normalized_slice'] * ndim)
return 'pythonic::types::numpy_gexpr<{0},{1}>'.format(arr, slices)
else:
return arr
elif isinstance(t, Pointer):
return 'pythonic::types::pointer<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Fun):
return 'pythonic::types::cfun<{0}({1})>'.format(
pytype_to_ctype(t.__args__[-1]),
", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]),
)
elif t in PYTYPE_TO_CTYPE_TABLE:
return PYTYPE_TO_CTYPE_TABLE[t]
else:
raise NotImplementedError("{0}:{1}".format(type(t), t)) | python | def pytype_to_ctype(t):
""" Python -> pythonic type binding. """
if isinstance(t, List):
return 'pythonic::types::list<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Set):
return 'pythonic::types::set<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Dict):
tkey, tvalue = t.__args__
return 'pythonic::types::dict<{0},{1}>'.format(pytype_to_ctype(tkey),
pytype_to_ctype(tvalue))
elif isinstance(t, Tuple):
return 'decltype(pythonic::types::make_tuple({0}))'.format(
", ".join('std::declval<{}>()'.format(pytype_to_ctype(p))
for p in t.__args__)
)
elif isinstance(t, NDArray):
dtype = pytype_to_ctype(t.__args__[0])
ndim = len(t.__args__) - 1
shapes = ','.join(('long'
if s.stop == -1 or s.stop is None
else 'std::integral_constant<long, {}>'.format(
s.stop)
) for s in t.__args__[1:])
pshape = 'pythonic::types::pshape<{0}>'.format(shapes)
arr = 'pythonic::types::ndarray<{0},{1}>'.format(
dtype, pshape)
if t.__args__[1].start == -1:
return 'pythonic::types::numpy_texpr<{0}>'.format(arr)
elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]):
slices = ", ".join(['pythonic::types::normalized_slice'] * ndim)
return 'pythonic::types::numpy_gexpr<{0},{1}>'.format(arr, slices)
else:
return arr
elif isinstance(t, Pointer):
return 'pythonic::types::pointer<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Fun):
return 'pythonic::types::cfun<{0}({1})>'.format(
pytype_to_ctype(t.__args__[-1]),
", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]),
)
elif t in PYTYPE_TO_CTYPE_TABLE:
return PYTYPE_TO_CTYPE_TABLE[t]
else:
raise NotImplementedError("{0}:{1}".format(type(t), t)) | [
"def",
"pytype_to_ctype",
"(",
"t",
")",
":",
"if",
"isinstance",
"(",
"t",
",",
"List",
")",
":",
"return",
"'pythonic::types::list<{0}>'",
".",
"format",
"(",
"pytype_to_ctype",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Set",
")",
":",
"return",
"'pythonic::types::set<{0}>'",
".",
"format",
"(",
"pytype_to_ctype",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Dict",
")",
":",
"tkey",
",",
"tvalue",
"=",
"t",
".",
"__args__",
"return",
"'pythonic::types::dict<{0},{1}>'",
".",
"format",
"(",
"pytype_to_ctype",
"(",
"tkey",
")",
",",
"pytype_to_ctype",
"(",
"tvalue",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Tuple",
")",
":",
"return",
"'decltype(pythonic::types::make_tuple({0}))'",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"'std::declval<{}>()'",
".",
"format",
"(",
"pytype_to_ctype",
"(",
"p",
")",
")",
"for",
"p",
"in",
"t",
".",
"__args__",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"NDArray",
")",
":",
"dtype",
"=",
"pytype_to_ctype",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
"ndim",
"=",
"len",
"(",
"t",
".",
"__args__",
")",
"-",
"1",
"shapes",
"=",
"','",
".",
"join",
"(",
"(",
"'long'",
"if",
"s",
".",
"stop",
"==",
"-",
"1",
"or",
"s",
".",
"stop",
"is",
"None",
"else",
"'std::integral_constant<long, {}>'",
".",
"format",
"(",
"s",
".",
"stop",
")",
")",
"for",
"s",
"in",
"t",
".",
"__args__",
"[",
"1",
":",
"]",
")",
"pshape",
"=",
"'pythonic::types::pshape<{0}>'",
".",
"format",
"(",
"shapes",
")",
"arr",
"=",
"'pythonic::types::ndarray<{0},{1}>'",
".",
"format",
"(",
"dtype",
",",
"pshape",
")",
"if",
"t",
".",
"__args__",
"[",
"1",
"]",
".",
"start",
"==",
"-",
"1",
":",
"return",
"'pythonic::types::numpy_texpr<{0}>'",
".",
"format",
"(",
"arr",
")",
"elif",
"any",
"(",
"s",
".",
"step",
"is",
"not",
"None",
"and",
"s",
".",
"step",
"<",
"0",
"for",
"s",
"in",
"t",
".",
"__args__",
"[",
"1",
":",
"]",
")",
":",
"slices",
"=",
"\", \"",
".",
"join",
"(",
"[",
"'pythonic::types::normalized_slice'",
"]",
"*",
"ndim",
")",
"return",
"'pythonic::types::numpy_gexpr<{0},{1}>'",
".",
"format",
"(",
"arr",
",",
"slices",
")",
"else",
":",
"return",
"arr",
"elif",
"isinstance",
"(",
"t",
",",
"Pointer",
")",
":",
"return",
"'pythonic::types::pointer<{0}>'",
".",
"format",
"(",
"pytype_to_ctype",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Fun",
")",
":",
"return",
"'pythonic::types::cfun<{0}({1})>'",
".",
"format",
"(",
"pytype_to_ctype",
"(",
"t",
".",
"__args__",
"[",
"-",
"1",
"]",
")",
",",
"\", \"",
".",
"join",
"(",
"pytype_to_ctype",
"(",
"arg",
")",
"for",
"arg",
"in",
"t",
".",
"__args__",
"[",
":",
"-",
"1",
"]",
")",
",",
")",
"elif",
"t",
"in",
"PYTYPE_TO_CTYPE_TABLE",
":",
"return",
"PYTYPE_TO_CTYPE_TABLE",
"[",
"t",
"]",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"{0}:{1}\"",
".",
"format",
"(",
"type",
"(",
"t",
")",
",",
"t",
")",
")"
] | Python -> pythonic type binding. | [
"Python",
"-",
">",
"pythonic",
"type",
"binding",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/conversion.py#L43-L92 | train | 232,562 |
serge-sans-paille/pythran | pythran/types/conversion.py | pytype_to_pretty_type | def pytype_to_pretty_type(t):
""" Python -> docstring type. """
if isinstance(t, List):
return '{0} list'.format(pytype_to_pretty_type(t.__args__[0]))
elif isinstance(t, Set):
return '{0} set'.format(pytype_to_pretty_type(t.__args__[0]))
elif isinstance(t, Dict):
tkey, tvalue = t.__args__
return '{0}:{1} dict'.format(pytype_to_pretty_type(tkey),
pytype_to_pretty_type(tvalue))
elif isinstance(t, Tuple):
return '({0})'.format(
", ".join(pytype_to_pretty_type(p) for p in t.__args__)
)
elif isinstance(t, NDArray):
dtype = pytype_to_pretty_type(t.__args__[0])
ndim = len(t.__args__) - 1
arr = '{0}[{1}]'.format(
dtype,
','.join(':' if s.stop in (-1, None) else str(s.stop)
for s in t.__args__[1:]))
# it's a transpose!
if t.__args__[1].start == -1:
return '{} order(F)'.format(arr)
elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]):
return '{0}[{1}]'.format(dtype, ','.join(['::'] * ndim))
else:
return arr
elif isinstance(t, Pointer):
dtype = pytype_to_pretty_type(t.__args__[0])
return '{}*'.format(dtype)
elif isinstance(t, Fun):
rtype = pytype_to_pretty_type(t.__args__[-1])
argtypes = [pytype_to_pretty_type(arg) for arg in t.__args__[:-1]]
return '{}({})'.format(rtype, ", ".join(argtypes))
elif t in PYTYPE_TO_CTYPE_TABLE:
return t.__name__
else:
raise NotImplementedError("{0}:{1}".format(type(t), t)) | python | def pytype_to_pretty_type(t):
""" Python -> docstring type. """
if isinstance(t, List):
return '{0} list'.format(pytype_to_pretty_type(t.__args__[0]))
elif isinstance(t, Set):
return '{0} set'.format(pytype_to_pretty_type(t.__args__[0]))
elif isinstance(t, Dict):
tkey, tvalue = t.__args__
return '{0}:{1} dict'.format(pytype_to_pretty_type(tkey),
pytype_to_pretty_type(tvalue))
elif isinstance(t, Tuple):
return '({0})'.format(
", ".join(pytype_to_pretty_type(p) for p in t.__args__)
)
elif isinstance(t, NDArray):
dtype = pytype_to_pretty_type(t.__args__[0])
ndim = len(t.__args__) - 1
arr = '{0}[{1}]'.format(
dtype,
','.join(':' if s.stop in (-1, None) else str(s.stop)
for s in t.__args__[1:]))
# it's a transpose!
if t.__args__[1].start == -1:
return '{} order(F)'.format(arr)
elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]):
return '{0}[{1}]'.format(dtype, ','.join(['::'] * ndim))
else:
return arr
elif isinstance(t, Pointer):
dtype = pytype_to_pretty_type(t.__args__[0])
return '{}*'.format(dtype)
elif isinstance(t, Fun):
rtype = pytype_to_pretty_type(t.__args__[-1])
argtypes = [pytype_to_pretty_type(arg) for arg in t.__args__[:-1]]
return '{}({})'.format(rtype, ", ".join(argtypes))
elif t in PYTYPE_TO_CTYPE_TABLE:
return t.__name__
else:
raise NotImplementedError("{0}:{1}".format(type(t), t)) | [
"def",
"pytype_to_pretty_type",
"(",
"t",
")",
":",
"if",
"isinstance",
"(",
"t",
",",
"List",
")",
":",
"return",
"'{0} list'",
".",
"format",
"(",
"pytype_to_pretty_type",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Set",
")",
":",
"return",
"'{0} set'",
".",
"format",
"(",
"pytype_to_pretty_type",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Dict",
")",
":",
"tkey",
",",
"tvalue",
"=",
"t",
".",
"__args__",
"return",
"'{0}:{1} dict'",
".",
"format",
"(",
"pytype_to_pretty_type",
"(",
"tkey",
")",
",",
"pytype_to_pretty_type",
"(",
"tvalue",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Tuple",
")",
":",
"return",
"'({0})'",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"pytype_to_pretty_type",
"(",
"p",
")",
"for",
"p",
"in",
"t",
".",
"__args__",
")",
")",
"elif",
"isinstance",
"(",
"t",
",",
"NDArray",
")",
":",
"dtype",
"=",
"pytype_to_pretty_type",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
"ndim",
"=",
"len",
"(",
"t",
".",
"__args__",
")",
"-",
"1",
"arr",
"=",
"'{0}[{1}]'",
".",
"format",
"(",
"dtype",
",",
"','",
".",
"join",
"(",
"':'",
"if",
"s",
".",
"stop",
"in",
"(",
"-",
"1",
",",
"None",
")",
"else",
"str",
"(",
"s",
".",
"stop",
")",
"for",
"s",
"in",
"t",
".",
"__args__",
"[",
"1",
":",
"]",
")",
")",
"# it's a transpose!",
"if",
"t",
".",
"__args__",
"[",
"1",
"]",
".",
"start",
"==",
"-",
"1",
":",
"return",
"'{} order(F)'",
".",
"format",
"(",
"arr",
")",
"elif",
"any",
"(",
"s",
".",
"step",
"is",
"not",
"None",
"and",
"s",
".",
"step",
"<",
"0",
"for",
"s",
"in",
"t",
".",
"__args__",
"[",
"1",
":",
"]",
")",
":",
"return",
"'{0}[{1}]'",
".",
"format",
"(",
"dtype",
",",
"','",
".",
"join",
"(",
"[",
"'::'",
"]",
"*",
"ndim",
")",
")",
"else",
":",
"return",
"arr",
"elif",
"isinstance",
"(",
"t",
",",
"Pointer",
")",
":",
"dtype",
"=",
"pytype_to_pretty_type",
"(",
"t",
".",
"__args__",
"[",
"0",
"]",
")",
"return",
"'{}*'",
".",
"format",
"(",
"dtype",
")",
"elif",
"isinstance",
"(",
"t",
",",
"Fun",
")",
":",
"rtype",
"=",
"pytype_to_pretty_type",
"(",
"t",
".",
"__args__",
"[",
"-",
"1",
"]",
")",
"argtypes",
"=",
"[",
"pytype_to_pretty_type",
"(",
"arg",
")",
"for",
"arg",
"in",
"t",
".",
"__args__",
"[",
":",
"-",
"1",
"]",
"]",
"return",
"'{}({})'",
".",
"format",
"(",
"rtype",
",",
"\", \"",
".",
"join",
"(",
"argtypes",
")",
")",
"elif",
"t",
"in",
"PYTYPE_TO_CTYPE_TABLE",
":",
"return",
"t",
".",
"__name__",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"{0}:{1}\"",
".",
"format",
"(",
"type",
"(",
"t",
")",
",",
"t",
")",
")"
] | Python -> docstring type. | [
"Python",
"-",
">",
"docstring",
"type",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/conversion.py#L95-L133 | train | 232,563 |
serge-sans-paille/pythran | pythran/types/tog.py | get_type | def get_type(name, env, non_generic):
"""Get the type of identifier name from the type environment env.
Args:
name: The identifier name
env: The type environment mapping from identifier names to types
non_generic: A set of non-generic TypeVariables
Raises:
ParseError: Raised if name is an undefined symbol in the type
environment.
"""
if name in env:
if isinstance(env[name], MultiType):
return clone(env[name])
return fresh(env[name], non_generic)
else:
print("W: Undefined symbol {0}".format(name))
return TypeVariable() | python | def get_type(name, env, non_generic):
"""Get the type of identifier name from the type environment env.
Args:
name: The identifier name
env: The type environment mapping from identifier names to types
non_generic: A set of non-generic TypeVariables
Raises:
ParseError: Raised if name is an undefined symbol in the type
environment.
"""
if name in env:
if isinstance(env[name], MultiType):
return clone(env[name])
return fresh(env[name], non_generic)
else:
print("W: Undefined symbol {0}".format(name))
return TypeVariable() | [
"def",
"get_type",
"(",
"name",
",",
"env",
",",
"non_generic",
")",
":",
"if",
"name",
"in",
"env",
":",
"if",
"isinstance",
"(",
"env",
"[",
"name",
"]",
",",
"MultiType",
")",
":",
"return",
"clone",
"(",
"env",
"[",
"name",
"]",
")",
"return",
"fresh",
"(",
"env",
"[",
"name",
"]",
",",
"non_generic",
")",
"else",
":",
"print",
"(",
"\"W: Undefined symbol {0}\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"TypeVariable",
"(",
")"
] | Get the type of identifier name from the type environment env.
Args:
name: The identifier name
env: The type environment mapping from identifier names to types
non_generic: A set of non-generic TypeVariables
Raises:
ParseError: Raised if name is an undefined symbol in the type
environment. | [
"Get",
"the",
"type",
"of",
"identifier",
"name",
"from",
"the",
"type",
"environment",
"env",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/tog.py#L1170-L1188 | train | 232,564 |
serge-sans-paille/pythran | pythran/types/tog.py | fresh | def fresh(t, non_generic):
"""Makes a copy of a type expression.
The type t is copied. The generic variables are duplicated and the
non_generic variables are shared.
Args:
t: A type to be copied.
non_generic: A set of non-generic TypeVariables
"""
mappings = {} # A mapping of TypeVariables to TypeVariables
def freshrec(tp):
p = prune(tp)
if isinstance(p, TypeVariable):
if is_generic(p, non_generic):
if p not in mappings:
mappings[p] = TypeVariable()
return mappings[p]
else:
return p
elif isinstance(p, dict):
return p # module
elif isinstance(p, Collection):
return Collection(*[freshrec(x) for x in p.types])
elif isinstance(p, Scalar):
return Scalar([freshrec(x) for x in p.types])
elif isinstance(p, TypeOperator):
return TypeOperator(p.name, [freshrec(x) for x in p.types])
elif isinstance(p, MultiType):
return MultiType([freshrec(x) for x in p.types])
else:
assert False, "missing freshrec case {}".format(type(p))
return freshrec(t) | python | def fresh(t, non_generic):
"""Makes a copy of a type expression.
The type t is copied. The generic variables are duplicated and the
non_generic variables are shared.
Args:
t: A type to be copied.
non_generic: A set of non-generic TypeVariables
"""
mappings = {} # A mapping of TypeVariables to TypeVariables
def freshrec(tp):
p = prune(tp)
if isinstance(p, TypeVariable):
if is_generic(p, non_generic):
if p not in mappings:
mappings[p] = TypeVariable()
return mappings[p]
else:
return p
elif isinstance(p, dict):
return p # module
elif isinstance(p, Collection):
return Collection(*[freshrec(x) for x in p.types])
elif isinstance(p, Scalar):
return Scalar([freshrec(x) for x in p.types])
elif isinstance(p, TypeOperator):
return TypeOperator(p.name, [freshrec(x) for x in p.types])
elif isinstance(p, MultiType):
return MultiType([freshrec(x) for x in p.types])
else:
assert False, "missing freshrec case {}".format(type(p))
return freshrec(t) | [
"def",
"fresh",
"(",
"t",
",",
"non_generic",
")",
":",
"mappings",
"=",
"{",
"}",
"# A mapping of TypeVariables to TypeVariables",
"def",
"freshrec",
"(",
"tp",
")",
":",
"p",
"=",
"prune",
"(",
"tp",
")",
"if",
"isinstance",
"(",
"p",
",",
"TypeVariable",
")",
":",
"if",
"is_generic",
"(",
"p",
",",
"non_generic",
")",
":",
"if",
"p",
"not",
"in",
"mappings",
":",
"mappings",
"[",
"p",
"]",
"=",
"TypeVariable",
"(",
")",
"return",
"mappings",
"[",
"p",
"]",
"else",
":",
"return",
"p",
"elif",
"isinstance",
"(",
"p",
",",
"dict",
")",
":",
"return",
"p",
"# module",
"elif",
"isinstance",
"(",
"p",
",",
"Collection",
")",
":",
"return",
"Collection",
"(",
"*",
"[",
"freshrec",
"(",
"x",
")",
"for",
"x",
"in",
"p",
".",
"types",
"]",
")",
"elif",
"isinstance",
"(",
"p",
",",
"Scalar",
")",
":",
"return",
"Scalar",
"(",
"[",
"freshrec",
"(",
"x",
")",
"for",
"x",
"in",
"p",
".",
"types",
"]",
")",
"elif",
"isinstance",
"(",
"p",
",",
"TypeOperator",
")",
":",
"return",
"TypeOperator",
"(",
"p",
".",
"name",
",",
"[",
"freshrec",
"(",
"x",
")",
"for",
"x",
"in",
"p",
".",
"types",
"]",
")",
"elif",
"isinstance",
"(",
"p",
",",
"MultiType",
")",
":",
"return",
"MultiType",
"(",
"[",
"freshrec",
"(",
"x",
")",
"for",
"x",
"in",
"p",
".",
"types",
"]",
")",
"else",
":",
"assert",
"False",
",",
"\"missing freshrec case {}\"",
".",
"format",
"(",
"type",
"(",
"p",
")",
")",
"return",
"freshrec",
"(",
"t",
")"
] | Makes a copy of a type expression.
The type t is copied. The generic variables are duplicated and the
non_generic variables are shared.
Args:
t: A type to be copied.
non_generic: A set of non-generic TypeVariables | [
"Makes",
"a",
"copy",
"of",
"a",
"type",
"expression",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/tog.py#L1191-L1226 | train | 232,565 |
serge-sans-paille/pythran | pythran/types/tog.py | prune | def prune(t):
"""Returns the currently defining instance of t.
As a side effect, collapses the list of type instances. The function Prune
is used whenever a type expression has to be inspected: it will always
return a type expression which is either an uninstantiated type variable or
a type operator; i.e. it will skip instantiated variables, and will
actually prune them from expressions to remove long chains of instantiated
variables.
Args:
t: The type to be pruned
Returns:
An uninstantiated TypeVariable or a TypeOperator
"""
if isinstance(t, TypeVariable):
if t.instance is not None:
t.instance = prune(t.instance)
return t.instance
return t | python | def prune(t):
"""Returns the currently defining instance of t.
As a side effect, collapses the list of type instances. The function Prune
is used whenever a type expression has to be inspected: it will always
return a type expression which is either an uninstantiated type variable or
a type operator; i.e. it will skip instantiated variables, and will
actually prune them from expressions to remove long chains of instantiated
variables.
Args:
t: The type to be pruned
Returns:
An uninstantiated TypeVariable or a TypeOperator
"""
if isinstance(t, TypeVariable):
if t.instance is not None:
t.instance = prune(t.instance)
return t.instance
return t | [
"def",
"prune",
"(",
"t",
")",
":",
"if",
"isinstance",
"(",
"t",
",",
"TypeVariable",
")",
":",
"if",
"t",
".",
"instance",
"is",
"not",
"None",
":",
"t",
".",
"instance",
"=",
"prune",
"(",
"t",
".",
"instance",
")",
"return",
"t",
".",
"instance",
"return",
"t"
] | Returns the currently defining instance of t.
As a side effect, collapses the list of type instances. The function Prune
is used whenever a type expression has to be inspected: it will always
return a type expression which is either an uninstantiated type variable or
a type operator; i.e. it will skip instantiated variables, and will
actually prune them from expressions to remove long chains of instantiated
variables.
Args:
t: The type to be pruned
Returns:
An uninstantiated TypeVariable or a TypeOperator | [
"Returns",
"the",
"currently",
"defining",
"instance",
"of",
"t",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/tog.py#L1352-L1372 | train | 232,566 |
serge-sans-paille/pythran | pythran/types/tog.py | occurs_in_type | def occurs_in_type(v, type2):
"""Checks whether a type variable occurs in a type expression.
Note: Must be called with v pre-pruned
Args:
v: The TypeVariable to be tested for
type2: The type in which to search
Returns:
True if v occurs in type2, otherwise False
"""
pruned_type2 = prune(type2)
if pruned_type2 == v:
return True
elif isinstance(pruned_type2, TypeOperator):
return occurs_in(v, pruned_type2.types)
return False | python | def occurs_in_type(v, type2):
"""Checks whether a type variable occurs in a type expression.
Note: Must be called with v pre-pruned
Args:
v: The TypeVariable to be tested for
type2: The type in which to search
Returns:
True if v occurs in type2, otherwise False
"""
pruned_type2 = prune(type2)
if pruned_type2 == v:
return True
elif isinstance(pruned_type2, TypeOperator):
return occurs_in(v, pruned_type2.types)
return False | [
"def",
"occurs_in_type",
"(",
"v",
",",
"type2",
")",
":",
"pruned_type2",
"=",
"prune",
"(",
"type2",
")",
"if",
"pruned_type2",
"==",
"v",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"pruned_type2",
",",
"TypeOperator",
")",
":",
"return",
"occurs_in",
"(",
"v",
",",
"pruned_type2",
".",
"types",
")",
"return",
"False"
] | Checks whether a type variable occurs in a type expression.
Note: Must be called with v pre-pruned
Args:
v: The TypeVariable to be tested for
type2: The type in which to search
Returns:
True if v occurs in type2, otherwise False | [
"Checks",
"whether",
"a",
"type",
"variable",
"occurs",
"in",
"a",
"type",
"expression",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/tog.py#L1394-L1411 | train | 232,567 |
serge-sans-paille/pythran | pythran/transformations/expand_imports.py | ExpandImports.visit_Module | def visit_Module(self, node):
"""
Visit the whole module and add all import at the top level.
>> import numpy.linalg
Becomes
>> import numpy
"""
node.body = [k for k in (self.visit(n) for n in node.body) if k]
imports = [ast.Import([ast.alias(i, mangle(i))]) for i in self.imports]
node.body = imports + node.body
ast.fix_missing_locations(node)
return node | python | def visit_Module(self, node):
"""
Visit the whole module and add all import at the top level.
>> import numpy.linalg
Becomes
>> import numpy
"""
node.body = [k for k in (self.visit(n) for n in node.body) if k]
imports = [ast.Import([ast.alias(i, mangle(i))]) for i in self.imports]
node.body = imports + node.body
ast.fix_missing_locations(node)
return node | [
"def",
"visit_Module",
"(",
"self",
",",
"node",
")",
":",
"node",
".",
"body",
"=",
"[",
"k",
"for",
"k",
"in",
"(",
"self",
".",
"visit",
"(",
"n",
")",
"for",
"n",
"in",
"node",
".",
"body",
")",
"if",
"k",
"]",
"imports",
"=",
"[",
"ast",
".",
"Import",
"(",
"[",
"ast",
".",
"alias",
"(",
"i",
",",
"mangle",
"(",
"i",
")",
")",
"]",
")",
"for",
"i",
"in",
"self",
".",
"imports",
"]",
"node",
".",
"body",
"=",
"imports",
"+",
"node",
".",
"body",
"ast",
".",
"fix_missing_locations",
"(",
"node",
")",
"return",
"node"
] | Visit the whole module and add all import at the top level.
>> import numpy.linalg
Becomes
>> import numpy | [
"Visit",
"the",
"whole",
"module",
"and",
"add",
"all",
"import",
"at",
"the",
"top",
"level",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/transformations/expand_imports.py#L46-L61 | train | 232,568 |
serge-sans-paille/pythran | pythran/transformations/expand_imports.py | ExpandImports.visit_Name | def visit_Name(self, node):
"""
Replace name with full expanded name.
Examples
--------
>> from numpy.linalg import det
>> det(a)
Becomes
>> numpy.linalg.det(a)
"""
if node.id in self.symbols:
symbol = path_to_node(self.symbols[node.id])
if not getattr(symbol, 'isliteral', lambda: False)():
parent = self.ancestors[node][-1]
blacklist = (ast.Tuple,
ast.List,
ast.Set,
ast.Return)
if isinstance(parent, blacklist):
raise PythranSyntaxError(
"Unsupported module identifier manipulation",
node)
new_node = path_to_attr(self.symbols[node.id])
new_node.ctx = node.ctx
ast.copy_location(new_node, node)
return new_node
return node | python | def visit_Name(self, node):
"""
Replace name with full expanded name.
Examples
--------
>> from numpy.linalg import det
>> det(a)
Becomes
>> numpy.linalg.det(a)
"""
if node.id in self.symbols:
symbol = path_to_node(self.symbols[node.id])
if not getattr(symbol, 'isliteral', lambda: False)():
parent = self.ancestors[node][-1]
blacklist = (ast.Tuple,
ast.List,
ast.Set,
ast.Return)
if isinstance(parent, blacklist):
raise PythranSyntaxError(
"Unsupported module identifier manipulation",
node)
new_node = path_to_attr(self.symbols[node.id])
new_node.ctx = node.ctx
ast.copy_location(new_node, node)
return new_node
return node | [
"def",
"visit_Name",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"id",
"in",
"self",
".",
"symbols",
":",
"symbol",
"=",
"path_to_node",
"(",
"self",
".",
"symbols",
"[",
"node",
".",
"id",
"]",
")",
"if",
"not",
"getattr",
"(",
"symbol",
",",
"'isliteral'",
",",
"lambda",
":",
"False",
")",
"(",
")",
":",
"parent",
"=",
"self",
".",
"ancestors",
"[",
"node",
"]",
"[",
"-",
"1",
"]",
"blacklist",
"=",
"(",
"ast",
".",
"Tuple",
",",
"ast",
".",
"List",
",",
"ast",
".",
"Set",
",",
"ast",
".",
"Return",
")",
"if",
"isinstance",
"(",
"parent",
",",
"blacklist",
")",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Unsupported module identifier manipulation\"",
",",
"node",
")",
"new_node",
"=",
"path_to_attr",
"(",
"self",
".",
"symbols",
"[",
"node",
".",
"id",
"]",
")",
"new_node",
".",
"ctx",
"=",
"node",
".",
"ctx",
"ast",
".",
"copy_location",
"(",
"new_node",
",",
"node",
")",
"return",
"new_node",
"return",
"node"
] | Replace name with full expanded name.
Examples
--------
>> from numpy.linalg import det
>> det(a)
Becomes
>> numpy.linalg.det(a) | [
"Replace",
"name",
"with",
"full",
"expanded",
"name",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/transformations/expand_imports.py#L134-L164 | train | 232,569 |
serge-sans-paille/pythran | pythran/analyses/argument_effects.py | save_function_effect | def save_function_effect(module):
""" Recursively save function effect for pythonic functions. """
for intr in module.values():
if isinstance(intr, dict): # Submodule case
save_function_effect(intr)
else:
fe = FunctionEffects(intr)
IntrinsicArgumentEffects[intr] = fe
if isinstance(intr, intrinsic.Class):
save_function_effect(intr.fields) | python | def save_function_effect(module):
""" Recursively save function effect for pythonic functions. """
for intr in module.values():
if isinstance(intr, dict): # Submodule case
save_function_effect(intr)
else:
fe = FunctionEffects(intr)
IntrinsicArgumentEffects[intr] = fe
if isinstance(intr, intrinsic.Class):
save_function_effect(intr.fields) | [
"def",
"save_function_effect",
"(",
"module",
")",
":",
"for",
"intr",
"in",
"module",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"intr",
",",
"dict",
")",
":",
"# Submodule case",
"save_function_effect",
"(",
"intr",
")",
"else",
":",
"fe",
"=",
"FunctionEffects",
"(",
"intr",
")",
"IntrinsicArgumentEffects",
"[",
"intr",
"]",
"=",
"fe",
"if",
"isinstance",
"(",
"intr",
",",
"intrinsic",
".",
"Class",
")",
":",
"save_function_effect",
"(",
"intr",
".",
"fields",
")"
] | Recursively save function effect for pythonic functions. | [
"Recursively",
"save",
"function",
"effect",
"for",
"pythonic",
"functions",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/argument_effects.py#L35-L44 | train | 232,570 |
serge-sans-paille/pythran | pythran/analyses/argument_effects.py | ArgumentEffects.prepare | def prepare(self, node):
"""
Initialise arguments effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions.
"""
super(ArgumentEffects, self).prepare(node)
for n in self.global_declarations.values():
fe = FunctionEffects(n)
self.node_to_functioneffect[n] = fe
self.result.add_node(fe) | python | def prepare(self, node):
"""
Initialise arguments effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions.
"""
super(ArgumentEffects, self).prepare(node)
for n in self.global_declarations.values():
fe = FunctionEffects(n)
self.node_to_functioneffect[n] = fe
self.result.add_node(fe) | [
"def",
"prepare",
"(",
"self",
",",
"node",
")",
":",
"super",
"(",
"ArgumentEffects",
",",
"self",
")",
".",
"prepare",
"(",
"node",
")",
"for",
"n",
"in",
"self",
".",
"global_declarations",
".",
"values",
"(",
")",
":",
"fe",
"=",
"FunctionEffects",
"(",
"n",
")",
"self",
".",
"node_to_functioneffect",
"[",
"n",
"]",
"=",
"fe",
"self",
".",
"result",
".",
"add_node",
"(",
"fe",
")"
] | Initialise arguments effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions. | [
"Initialise",
"arguments",
"effects",
"as",
"this",
"analyse",
"is",
"inter",
"-",
"procedural",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/argument_effects.py#L62-L73 | train | 232,571 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.process_locals | def process_locals(self, node, node_visited, *skipped):
"""
Declare variable local to node and insert declaration before.
Not possible for function yielding values.
"""
local_vars = self.scope[node].difference(skipped)
local_vars = local_vars.difference(self.openmp_deps)
if not local_vars:
return node_visited # no processing
locals_visited = []
for varname in local_vars:
vartype = self.typeof(varname)
decl = Statement("{} {}".format(vartype, varname))
locals_visited.append(decl)
self.ldecls.difference_update(local_vars)
return Block(locals_visited + [node_visited]) | python | def process_locals(self, node, node_visited, *skipped):
"""
Declare variable local to node and insert declaration before.
Not possible for function yielding values.
"""
local_vars = self.scope[node].difference(skipped)
local_vars = local_vars.difference(self.openmp_deps)
if not local_vars:
return node_visited # no processing
locals_visited = []
for varname in local_vars:
vartype = self.typeof(varname)
decl = Statement("{} {}".format(vartype, varname))
locals_visited.append(decl)
self.ldecls.difference_update(local_vars)
return Block(locals_visited + [node_visited]) | [
"def",
"process_locals",
"(",
"self",
",",
"node",
",",
"node_visited",
",",
"*",
"skipped",
")",
":",
"local_vars",
"=",
"self",
".",
"scope",
"[",
"node",
"]",
".",
"difference",
"(",
"skipped",
")",
"local_vars",
"=",
"local_vars",
".",
"difference",
"(",
"self",
".",
"openmp_deps",
")",
"if",
"not",
"local_vars",
":",
"return",
"node_visited",
"# no processing",
"locals_visited",
"=",
"[",
"]",
"for",
"varname",
"in",
"local_vars",
":",
"vartype",
"=",
"self",
".",
"typeof",
"(",
"varname",
")",
"decl",
"=",
"Statement",
"(",
"\"{} {}\"",
".",
"format",
"(",
"vartype",
",",
"varname",
")",
")",
"locals_visited",
".",
"append",
"(",
"decl",
")",
"self",
".",
"ldecls",
".",
"difference_update",
"(",
"local_vars",
")",
"return",
"Block",
"(",
"locals_visited",
"+",
"[",
"node_visited",
"]",
")"
] | Declare variable local to node and insert declaration before.
Not possible for function yielding values. | [
"Declare",
"variable",
"local",
"to",
"node",
"and",
"insert",
"declaration",
"before",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L217-L234 | train | 232,572 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.process_omp_attachements | def process_omp_attachements(self, node, stmt, index=None):
"""
Add OpenMP pragma on the correct stmt in the correct order.
stmt may be a list. On this case, index have to be specify to add
OpenMP on the correct statement.
"""
omp_directives = metadata.get(node, OMPDirective)
if omp_directives:
directives = list()
for directive in omp_directives:
directive.deps = [self.visit(dep) for dep in directive.deps]
directives.append(directive)
if index is None:
stmt = AnnotatedStatement(stmt, directives)
else:
stmt[index] = AnnotatedStatement(stmt[index], directives)
return stmt | python | def process_omp_attachements(self, node, stmt, index=None):
"""
Add OpenMP pragma on the correct stmt in the correct order.
stmt may be a list. On this case, index have to be specify to add
OpenMP on the correct statement.
"""
omp_directives = metadata.get(node, OMPDirective)
if omp_directives:
directives = list()
for directive in omp_directives:
directive.deps = [self.visit(dep) for dep in directive.deps]
directives.append(directive)
if index is None:
stmt = AnnotatedStatement(stmt, directives)
else:
stmt[index] = AnnotatedStatement(stmt[index], directives)
return stmt | [
"def",
"process_omp_attachements",
"(",
"self",
",",
"node",
",",
"stmt",
",",
"index",
"=",
"None",
")",
":",
"omp_directives",
"=",
"metadata",
".",
"get",
"(",
"node",
",",
"OMPDirective",
")",
"if",
"omp_directives",
":",
"directives",
"=",
"list",
"(",
")",
"for",
"directive",
"in",
"omp_directives",
":",
"directive",
".",
"deps",
"=",
"[",
"self",
".",
"visit",
"(",
"dep",
")",
"for",
"dep",
"in",
"directive",
".",
"deps",
"]",
"directives",
".",
"append",
"(",
"directive",
")",
"if",
"index",
"is",
"None",
":",
"stmt",
"=",
"AnnotatedStatement",
"(",
"stmt",
",",
"directives",
")",
"else",
":",
"stmt",
"[",
"index",
"]",
"=",
"AnnotatedStatement",
"(",
"stmt",
"[",
"index",
"]",
",",
"directives",
")",
"return",
"stmt"
] | Add OpenMP pragma on the correct stmt in the correct order.
stmt may be a list. On this case, index have to be specify to add
OpenMP on the correct statement. | [
"Add",
"OpenMP",
"pragma",
"on",
"the",
"correct",
"stmt",
"in",
"the",
"correct",
"order",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L244-L261 | train | 232,573 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.visit_Assign | def visit_Assign(self, node):
"""
Create Assign node for final Cxx representation.
It tries to handle multi assignment like:
>> a = b = c = 2
If only one local variable is assigned, typing is added:
>> int a = 2;
TODO: Handle case of multi-assignement for some local variables.
Finally, process OpenMP clause like #pragma omp atomic
"""
if not all(isinstance(n, (ast.Name, ast.Subscript))
for n in node.targets):
raise PythranSyntaxError(
"Must assign to an identifier or a subscript",
node)
value = self.visit(node.value)
targets = [self.visit(t) for t in node.targets]
alltargets = "= ".join(targets)
islocal = (len(targets) == 1 and
isinstance(node.targets[0], ast.Name) and
node.targets[0].id in self.scope[node] and
node.targets[0].id not in self.openmp_deps)
if islocal:
# remove this decls from local decls
self.ldecls.difference_update(t.id for t in node.targets)
# add a local declaration
if self.types[node.targets[0]].iscombined():
alltargets = '{} {}'.format(self.typeof(node.targets[0]),
alltargets)
elif isinstance(self.types[node.targets[0]],
self.types.builder.Assignable):
alltargets = '{} {}'.format(
self.types.builder.Assignable(
self.types.builder.NamedType(
'decltype({})'.format(value))),
alltargets)
else:
assert isinstance(self.types[node.targets[0]],
self.types.builder.Lazy)
alltargets = '{} {}'.format(
self.types.builder.Lazy(
self.types.builder.NamedType(
'decltype({})'.format(value))),
alltargets)
stmt = Assign(alltargets, value)
return self.process_omp_attachements(node, stmt) | python | def visit_Assign(self, node):
"""
Create Assign node for final Cxx representation.
It tries to handle multi assignment like:
>> a = b = c = 2
If only one local variable is assigned, typing is added:
>> int a = 2;
TODO: Handle case of multi-assignement for some local variables.
Finally, process OpenMP clause like #pragma omp atomic
"""
if not all(isinstance(n, (ast.Name, ast.Subscript))
for n in node.targets):
raise PythranSyntaxError(
"Must assign to an identifier or a subscript",
node)
value = self.visit(node.value)
targets = [self.visit(t) for t in node.targets]
alltargets = "= ".join(targets)
islocal = (len(targets) == 1 and
isinstance(node.targets[0], ast.Name) and
node.targets[0].id in self.scope[node] and
node.targets[0].id not in self.openmp_deps)
if islocal:
# remove this decls from local decls
self.ldecls.difference_update(t.id for t in node.targets)
# add a local declaration
if self.types[node.targets[0]].iscombined():
alltargets = '{} {}'.format(self.typeof(node.targets[0]),
alltargets)
elif isinstance(self.types[node.targets[0]],
self.types.builder.Assignable):
alltargets = '{} {}'.format(
self.types.builder.Assignable(
self.types.builder.NamedType(
'decltype({})'.format(value))),
alltargets)
else:
assert isinstance(self.types[node.targets[0]],
self.types.builder.Lazy)
alltargets = '{} {}'.format(
self.types.builder.Lazy(
self.types.builder.NamedType(
'decltype({})'.format(value))),
alltargets)
stmt = Assign(alltargets, value)
return self.process_omp_attachements(node, stmt) | [
"def",
"visit_Assign",
"(",
"self",
",",
"node",
")",
":",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"n",
",",
"(",
"ast",
".",
"Name",
",",
"ast",
".",
"Subscript",
")",
")",
"for",
"n",
"in",
"node",
".",
"targets",
")",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Must assign to an identifier or a subscript\"",
",",
"node",
")",
"value",
"=",
"self",
".",
"visit",
"(",
"node",
".",
"value",
")",
"targets",
"=",
"[",
"self",
".",
"visit",
"(",
"t",
")",
"for",
"t",
"in",
"node",
".",
"targets",
"]",
"alltargets",
"=",
"\"= \"",
".",
"join",
"(",
"targets",
")",
"islocal",
"=",
"(",
"len",
"(",
"targets",
")",
"==",
"1",
"and",
"isinstance",
"(",
"node",
".",
"targets",
"[",
"0",
"]",
",",
"ast",
".",
"Name",
")",
"and",
"node",
".",
"targets",
"[",
"0",
"]",
".",
"id",
"in",
"self",
".",
"scope",
"[",
"node",
"]",
"and",
"node",
".",
"targets",
"[",
"0",
"]",
".",
"id",
"not",
"in",
"self",
".",
"openmp_deps",
")",
"if",
"islocal",
":",
"# remove this decls from local decls",
"self",
".",
"ldecls",
".",
"difference_update",
"(",
"t",
".",
"id",
"for",
"t",
"in",
"node",
".",
"targets",
")",
"# add a local declaration",
"if",
"self",
".",
"types",
"[",
"node",
".",
"targets",
"[",
"0",
"]",
"]",
".",
"iscombined",
"(",
")",
":",
"alltargets",
"=",
"'{} {}'",
".",
"format",
"(",
"self",
".",
"typeof",
"(",
"node",
".",
"targets",
"[",
"0",
"]",
")",
",",
"alltargets",
")",
"elif",
"isinstance",
"(",
"self",
".",
"types",
"[",
"node",
".",
"targets",
"[",
"0",
"]",
"]",
",",
"self",
".",
"types",
".",
"builder",
".",
"Assignable",
")",
":",
"alltargets",
"=",
"'{} {}'",
".",
"format",
"(",
"self",
".",
"types",
".",
"builder",
".",
"Assignable",
"(",
"self",
".",
"types",
".",
"builder",
".",
"NamedType",
"(",
"'decltype({})'",
".",
"format",
"(",
"value",
")",
")",
")",
",",
"alltargets",
")",
"else",
":",
"assert",
"isinstance",
"(",
"self",
".",
"types",
"[",
"node",
".",
"targets",
"[",
"0",
"]",
"]",
",",
"self",
".",
"types",
".",
"builder",
".",
"Lazy",
")",
"alltargets",
"=",
"'{} {}'",
".",
"format",
"(",
"self",
".",
"types",
".",
"builder",
".",
"Lazy",
"(",
"self",
".",
"types",
".",
"builder",
".",
"NamedType",
"(",
"'decltype({})'",
".",
"format",
"(",
"value",
")",
")",
")",
",",
"alltargets",
")",
"stmt",
"=",
"Assign",
"(",
"alltargets",
",",
"value",
")",
"return",
"self",
".",
"process_omp_attachements",
"(",
"node",
",",
"stmt",
")"
] | Create Assign node for final Cxx representation.
It tries to handle multi assignment like:
>> a = b = c = 2
If only one local variable is assigned, typing is added:
>> int a = 2;
TODO: Handle case of multi-assignement for some local variables.
Finally, process OpenMP clause like #pragma omp atomic | [
"Create",
"Assign",
"node",
"for",
"final",
"Cxx",
"representation",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L392-L443 | train | 232,574 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.gen_for | def gen_for(self, node, target, local_iter, local_iter_decl, loop_body):
"""
Create For representation on iterator for Cxx generation.
Examples
--------
>> "omp parallel for"
>> for i in xrange(10):
>> ... do things ...
Becomes
>> "omp parallel for shared(__iterX)"
>> for(decltype(__iterX)::iterator __targetX = __iterX.begin();
__targetX < __iterX.end(); ++__targetX)
>> auto&& i = *__targetX;
>> ... do things ...
It the case of not local variable, typing for `i` disappear and typing
is removed for iterator in case of yields statement in function.
"""
# Choose target variable for iterator (which is iterator type)
local_target = "__target{0}".format(id(node))
local_target_decl = self.types.builder.IteratorOfType(local_iter_decl)
# If variable is local to the for body it's a ref to the iterator value
# type
if node.target.id in self.scope[node] and not hasattr(self, 'yields'):
local_type = "auto&&"
else:
local_type = ""
# Assign iterable value
loop_body_prelude = Statement("{} {}= *{}".format(local_type,
target,
local_target))
# Create the loop
assign = self.make_assign(local_target_decl, local_target, local_iter)
loop = For("{}.begin()".format(assign),
"{0} < {1}.end()".format(local_target, local_iter),
"++{0}".format(local_target),
Block([loop_body_prelude, loop_body]))
return [self.process_omp_attachements(node, loop)] | python | def gen_for(self, node, target, local_iter, local_iter_decl, loop_body):
"""
Create For representation on iterator for Cxx generation.
Examples
--------
>> "omp parallel for"
>> for i in xrange(10):
>> ... do things ...
Becomes
>> "omp parallel for shared(__iterX)"
>> for(decltype(__iterX)::iterator __targetX = __iterX.begin();
__targetX < __iterX.end(); ++__targetX)
>> auto&& i = *__targetX;
>> ... do things ...
It the case of not local variable, typing for `i` disappear and typing
is removed for iterator in case of yields statement in function.
"""
# Choose target variable for iterator (which is iterator type)
local_target = "__target{0}".format(id(node))
local_target_decl = self.types.builder.IteratorOfType(local_iter_decl)
# If variable is local to the for body it's a ref to the iterator value
# type
if node.target.id in self.scope[node] and not hasattr(self, 'yields'):
local_type = "auto&&"
else:
local_type = ""
# Assign iterable value
loop_body_prelude = Statement("{} {}= *{}".format(local_type,
target,
local_target))
# Create the loop
assign = self.make_assign(local_target_decl, local_target, local_iter)
loop = For("{}.begin()".format(assign),
"{0} < {1}.end()".format(local_target, local_iter),
"++{0}".format(local_target),
Block([loop_body_prelude, loop_body]))
return [self.process_omp_attachements(node, loop)] | [
"def",
"gen_for",
"(",
"self",
",",
"node",
",",
"target",
",",
"local_iter",
",",
"local_iter_decl",
",",
"loop_body",
")",
":",
"# Choose target variable for iterator (which is iterator type)",
"local_target",
"=",
"\"__target{0}\"",
".",
"format",
"(",
"id",
"(",
"node",
")",
")",
"local_target_decl",
"=",
"self",
".",
"types",
".",
"builder",
".",
"IteratorOfType",
"(",
"local_iter_decl",
")",
"# If variable is local to the for body it's a ref to the iterator value",
"# type",
"if",
"node",
".",
"target",
".",
"id",
"in",
"self",
".",
"scope",
"[",
"node",
"]",
"and",
"not",
"hasattr",
"(",
"self",
",",
"'yields'",
")",
":",
"local_type",
"=",
"\"auto&&\"",
"else",
":",
"local_type",
"=",
"\"\"",
"# Assign iterable value",
"loop_body_prelude",
"=",
"Statement",
"(",
"\"{} {}= *{}\"",
".",
"format",
"(",
"local_type",
",",
"target",
",",
"local_target",
")",
")",
"# Create the loop",
"assign",
"=",
"self",
".",
"make_assign",
"(",
"local_target_decl",
",",
"local_target",
",",
"local_iter",
")",
"loop",
"=",
"For",
"(",
"\"{}.begin()\"",
".",
"format",
"(",
"assign",
")",
",",
"\"{0} < {1}.end()\"",
".",
"format",
"(",
"local_target",
",",
"local_iter",
")",
",",
"\"++{0}\"",
".",
"format",
"(",
"local_target",
")",
",",
"Block",
"(",
"[",
"loop_body_prelude",
",",
"loop_body",
"]",
")",
")",
"return",
"[",
"self",
".",
"process_omp_attachements",
"(",
"node",
",",
"loop",
")",
"]"
] | Create For representation on iterator for Cxx generation.
Examples
--------
>> "omp parallel for"
>> for i in xrange(10):
>> ... do things ...
Becomes
>> "omp parallel for shared(__iterX)"
>> for(decltype(__iterX)::iterator __targetX = __iterX.begin();
__targetX < __iterX.end(); ++__targetX)
>> auto&& i = *__targetX;
>> ... do things ...
It the case of not local variable, typing for `i` disappear and typing
is removed for iterator in case of yields statement in function. | [
"Create",
"For",
"representation",
"on",
"iterator",
"for",
"Cxx",
"generation",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L460-L503 | train | 232,575 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.handle_real_loop_comparison | def handle_real_loop_comparison(self, args, target, upper_bound):
"""
Handle comparison for real loops.
Add the correct comparison operator if possible.
"""
# order is 1 for increasing loop, -1 for decreasing loop and 0 if it is
# not known at compile time
if len(args) <= 2:
order = 1
elif isinstance(args[2], ast.Num):
order = -1 + 2 * (int(args[2].n) > 0)
elif isinstance(args[1], ast.Num) and isinstance(args[0], ast.Num):
order = -1 + 2 * (int(args[1].n) > int(args[0].n))
else:
order = 0
comparison = "{} < {}" if order == 1 else "{} > {}"
comparison = comparison.format(target, upper_bound)
return comparison | python | def handle_real_loop_comparison(self, args, target, upper_bound):
"""
Handle comparison for real loops.
Add the correct comparison operator if possible.
"""
# order is 1 for increasing loop, -1 for decreasing loop and 0 if it is
# not known at compile time
if len(args) <= 2:
order = 1
elif isinstance(args[2], ast.Num):
order = -1 + 2 * (int(args[2].n) > 0)
elif isinstance(args[1], ast.Num) and isinstance(args[0], ast.Num):
order = -1 + 2 * (int(args[1].n) > int(args[0].n))
else:
order = 0
comparison = "{} < {}" if order == 1 else "{} > {}"
comparison = comparison.format(target, upper_bound)
return comparison | [
"def",
"handle_real_loop_comparison",
"(",
"self",
",",
"args",
",",
"target",
",",
"upper_bound",
")",
":",
"# order is 1 for increasing loop, -1 for decreasing loop and 0 if it is",
"# not known at compile time",
"if",
"len",
"(",
"args",
")",
"<=",
"2",
":",
"order",
"=",
"1",
"elif",
"isinstance",
"(",
"args",
"[",
"2",
"]",
",",
"ast",
".",
"Num",
")",
":",
"order",
"=",
"-",
"1",
"+",
"2",
"*",
"(",
"int",
"(",
"args",
"[",
"2",
"]",
".",
"n",
")",
">",
"0",
")",
"elif",
"isinstance",
"(",
"args",
"[",
"1",
"]",
",",
"ast",
".",
"Num",
")",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"ast",
".",
"Num",
")",
":",
"order",
"=",
"-",
"1",
"+",
"2",
"*",
"(",
"int",
"(",
"args",
"[",
"1",
"]",
".",
"n",
")",
">",
"int",
"(",
"args",
"[",
"0",
"]",
".",
"n",
")",
")",
"else",
":",
"order",
"=",
"0",
"comparison",
"=",
"\"{} < {}\"",
"if",
"order",
"==",
"1",
"else",
"\"{} > {}\"",
"comparison",
"=",
"comparison",
".",
"format",
"(",
"target",
",",
"upper_bound",
")",
"return",
"comparison"
] | Handle comparison for real loops.
Add the correct comparison operator if possible. | [
"Handle",
"comparison",
"for",
"real",
"loops",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L505-L524 | train | 232,576 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.gen_c_for | def gen_c_for(self, node, local_iter, loop_body):
"""
Create C For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... do things ...
Becomes
>> for(long i = 0, __targetX = 10; i < __targetX; i += 1)
>> ... do things ...
Or
>> for i in xrange(10, 0, -1):
>> ... do things ...
Becomes
>> for(long i = 10, __targetX = 0; i > __targetX; i += -1)
>> ... do things ...
It the case of not local variable, typing for `i` disappear
"""
args = node.iter.args
step = "1L" if len(args) <= 2 else self.visit(args[2])
if len(args) == 1:
lower_bound = "0L"
upper_arg = 0
else:
lower_bound = self.visit(args[0])
upper_arg = 1
upper_type = iter_type = "long "
upper_value = self.visit(args[upper_arg])
if is_simple_expr(args[upper_arg]):
upper_bound = upper_value # compatible with collapse
else:
upper_bound = "__target{0}".format(id(node))
# If variable is local to the for body keep it local...
if node.target.id in self.scope[node] and not hasattr(self, 'yields'):
loop = list()
else:
# For yield function, upper_bound is globals.
iter_type = ""
# Back one step to keep Python behavior (except for break)
loop = [If("{} == {}".format(local_iter, upper_bound),
Statement("{} -= {}".format(local_iter, step)))]
comparison = self.handle_real_loop_comparison(args, local_iter,
upper_bound)
forloop = For("{0} {1}={2}".format(iter_type, local_iter, lower_bound),
comparison,
"{0} += {1}".format(local_iter, step),
loop_body)
loop.insert(0, self.process_omp_attachements(node, forloop))
# Store upper bound value if needed
if upper_bound is upper_value:
header = []
else:
assgnt = self.make_assign(upper_type, upper_bound, upper_value)
header = [Statement(assgnt)]
return header, loop | python | def gen_c_for(self, node, local_iter, loop_body):
"""
Create C For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... do things ...
Becomes
>> for(long i = 0, __targetX = 10; i < __targetX; i += 1)
>> ... do things ...
Or
>> for i in xrange(10, 0, -1):
>> ... do things ...
Becomes
>> for(long i = 10, __targetX = 0; i > __targetX; i += -1)
>> ... do things ...
It the case of not local variable, typing for `i` disappear
"""
args = node.iter.args
step = "1L" if len(args) <= 2 else self.visit(args[2])
if len(args) == 1:
lower_bound = "0L"
upper_arg = 0
else:
lower_bound = self.visit(args[0])
upper_arg = 1
upper_type = iter_type = "long "
upper_value = self.visit(args[upper_arg])
if is_simple_expr(args[upper_arg]):
upper_bound = upper_value # compatible with collapse
else:
upper_bound = "__target{0}".format(id(node))
# If variable is local to the for body keep it local...
if node.target.id in self.scope[node] and not hasattr(self, 'yields'):
loop = list()
else:
# For yield function, upper_bound is globals.
iter_type = ""
# Back one step to keep Python behavior (except for break)
loop = [If("{} == {}".format(local_iter, upper_bound),
Statement("{} -= {}".format(local_iter, step)))]
comparison = self.handle_real_loop_comparison(args, local_iter,
upper_bound)
forloop = For("{0} {1}={2}".format(iter_type, local_iter, lower_bound),
comparison,
"{0} += {1}".format(local_iter, step),
loop_body)
loop.insert(0, self.process_omp_attachements(node, forloop))
# Store upper bound value if needed
if upper_bound is upper_value:
header = []
else:
assgnt = self.make_assign(upper_type, upper_bound, upper_value)
header = [Statement(assgnt)]
return header, loop | [
"def",
"gen_c_for",
"(",
"self",
",",
"node",
",",
"local_iter",
",",
"loop_body",
")",
":",
"args",
"=",
"node",
".",
"iter",
".",
"args",
"step",
"=",
"\"1L\"",
"if",
"len",
"(",
"args",
")",
"<=",
"2",
"else",
"self",
".",
"visit",
"(",
"args",
"[",
"2",
"]",
")",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"lower_bound",
"=",
"\"0L\"",
"upper_arg",
"=",
"0",
"else",
":",
"lower_bound",
"=",
"self",
".",
"visit",
"(",
"args",
"[",
"0",
"]",
")",
"upper_arg",
"=",
"1",
"upper_type",
"=",
"iter_type",
"=",
"\"long \"",
"upper_value",
"=",
"self",
".",
"visit",
"(",
"args",
"[",
"upper_arg",
"]",
")",
"if",
"is_simple_expr",
"(",
"args",
"[",
"upper_arg",
"]",
")",
":",
"upper_bound",
"=",
"upper_value",
"# compatible with collapse",
"else",
":",
"upper_bound",
"=",
"\"__target{0}\"",
".",
"format",
"(",
"id",
"(",
"node",
")",
")",
"# If variable is local to the for body keep it local...",
"if",
"node",
".",
"target",
".",
"id",
"in",
"self",
".",
"scope",
"[",
"node",
"]",
"and",
"not",
"hasattr",
"(",
"self",
",",
"'yields'",
")",
":",
"loop",
"=",
"list",
"(",
")",
"else",
":",
"# For yield function, upper_bound is globals.",
"iter_type",
"=",
"\"\"",
"# Back one step to keep Python behavior (except for break)",
"loop",
"=",
"[",
"If",
"(",
"\"{} == {}\"",
".",
"format",
"(",
"local_iter",
",",
"upper_bound",
")",
",",
"Statement",
"(",
"\"{} -= {}\"",
".",
"format",
"(",
"local_iter",
",",
"step",
")",
")",
")",
"]",
"comparison",
"=",
"self",
".",
"handle_real_loop_comparison",
"(",
"args",
",",
"local_iter",
",",
"upper_bound",
")",
"forloop",
"=",
"For",
"(",
"\"{0} {1}={2}\"",
".",
"format",
"(",
"iter_type",
",",
"local_iter",
",",
"lower_bound",
")",
",",
"comparison",
",",
"\"{0} += {1}\"",
".",
"format",
"(",
"local_iter",
",",
"step",
")",
",",
"loop_body",
")",
"loop",
".",
"insert",
"(",
"0",
",",
"self",
".",
"process_omp_attachements",
"(",
"node",
",",
"forloop",
")",
")",
"# Store upper bound value if needed",
"if",
"upper_bound",
"is",
"upper_value",
":",
"header",
"=",
"[",
"]",
"else",
":",
"assgnt",
"=",
"self",
".",
"make_assign",
"(",
"upper_type",
",",
"upper_bound",
",",
"upper_value",
")",
"header",
"=",
"[",
"Statement",
"(",
"assgnt",
")",
"]",
"return",
"header",
",",
"loop"
] | Create C For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... do things ...
Becomes
>> for(long i = 0, __targetX = 10; i < __targetX; i += 1)
>> ... do things ...
Or
>> for i in xrange(10, 0, -1):
>> ... do things ...
Becomes
>> for(long i = 10, __targetX = 0; i > __targetX; i += -1)
>> ... do things ...
It the case of not local variable, typing for `i` disappear | [
"Create",
"C",
"For",
"representation",
"for",
"Cxx",
"generation",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L526-L596 | train | 232,577 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.handle_omp_for | def handle_omp_for(self, node, local_iter):
"""
Fix OpenMP directives on For loops.
Add the target as private variable as a new variable may have been
introduce to handle cxx iterator.
Also, add the iterator as shared variable as all 'parallel for chunck'
have to use the same iterator.
"""
for directive in metadata.get(node, OMPDirective):
if any(key in directive.s for key in (' parallel ', ' task ')):
# Eventually add local_iter in a shared clause as iterable is
# shared in the for loop (for every clause with datasharing)
directive.s += ' shared({})'
directive.deps.append(ast.Name(local_iter, ast.Load(), None))
directive.shared_deps.append(directive.deps[-1])
target = node.target
assert isinstance(target, ast.Name)
hasfor = 'for' in directive.s
nodefault = 'default' not in directive.s
noindexref = all(isinstance(x, ast.Name) and
x.id != target.id for x in directive.deps)
if (hasfor and nodefault and noindexref and
target.id not in self.scope[node]):
# Target is private by default in omp but iterator use may
# introduce an extra variable
directive.s += ' private({})'
directive.deps.append(ast.Name(target.id, ast.Load(), None))
directive.private_deps.append(directive.deps[-1]) | python | def handle_omp_for(self, node, local_iter):
"""
Fix OpenMP directives on For loops.
Add the target as private variable as a new variable may have been
introduce to handle cxx iterator.
Also, add the iterator as shared variable as all 'parallel for chunck'
have to use the same iterator.
"""
for directive in metadata.get(node, OMPDirective):
if any(key in directive.s for key in (' parallel ', ' task ')):
# Eventually add local_iter in a shared clause as iterable is
# shared in the for loop (for every clause with datasharing)
directive.s += ' shared({})'
directive.deps.append(ast.Name(local_iter, ast.Load(), None))
directive.shared_deps.append(directive.deps[-1])
target = node.target
assert isinstance(target, ast.Name)
hasfor = 'for' in directive.s
nodefault = 'default' not in directive.s
noindexref = all(isinstance(x, ast.Name) and
x.id != target.id for x in directive.deps)
if (hasfor and nodefault and noindexref and
target.id not in self.scope[node]):
# Target is private by default in omp but iterator use may
# introduce an extra variable
directive.s += ' private({})'
directive.deps.append(ast.Name(target.id, ast.Load(), None))
directive.private_deps.append(directive.deps[-1]) | [
"def",
"handle_omp_for",
"(",
"self",
",",
"node",
",",
"local_iter",
")",
":",
"for",
"directive",
"in",
"metadata",
".",
"get",
"(",
"node",
",",
"OMPDirective",
")",
":",
"if",
"any",
"(",
"key",
"in",
"directive",
".",
"s",
"for",
"key",
"in",
"(",
"' parallel '",
",",
"' task '",
")",
")",
":",
"# Eventually add local_iter in a shared clause as iterable is",
"# shared in the for loop (for every clause with datasharing)",
"directive",
".",
"s",
"+=",
"' shared({})'",
"directive",
".",
"deps",
".",
"append",
"(",
"ast",
".",
"Name",
"(",
"local_iter",
",",
"ast",
".",
"Load",
"(",
")",
",",
"None",
")",
")",
"directive",
".",
"shared_deps",
".",
"append",
"(",
"directive",
".",
"deps",
"[",
"-",
"1",
"]",
")",
"target",
"=",
"node",
".",
"target",
"assert",
"isinstance",
"(",
"target",
",",
"ast",
".",
"Name",
")",
"hasfor",
"=",
"'for'",
"in",
"directive",
".",
"s",
"nodefault",
"=",
"'default'",
"not",
"in",
"directive",
".",
"s",
"noindexref",
"=",
"all",
"(",
"isinstance",
"(",
"x",
",",
"ast",
".",
"Name",
")",
"and",
"x",
".",
"id",
"!=",
"target",
".",
"id",
"for",
"x",
"in",
"directive",
".",
"deps",
")",
"if",
"(",
"hasfor",
"and",
"nodefault",
"and",
"noindexref",
"and",
"target",
".",
"id",
"not",
"in",
"self",
".",
"scope",
"[",
"node",
"]",
")",
":",
"# Target is private by default in omp but iterator use may",
"# introduce an extra variable",
"directive",
".",
"s",
"+=",
"' private({})'",
"directive",
".",
"deps",
".",
"append",
"(",
"ast",
".",
"Name",
"(",
"target",
".",
"id",
",",
"ast",
".",
"Load",
"(",
")",
",",
"None",
")",
")",
"directive",
".",
"private_deps",
".",
"append",
"(",
"directive",
".",
"deps",
"[",
"-",
"1",
"]",
")"
] | Fix OpenMP directives on For loops.
Add the target as private variable as a new variable may have been
introduce to handle cxx iterator.
Also, add the iterator as shared variable as all 'parallel for chunck'
have to use the same iterator. | [
"Fix",
"OpenMP",
"directives",
"on",
"For",
"loops",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L598-L628 | train | 232,578 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.can_use_autofor | def can_use_autofor(self, node):
"""
Check if given for Node can use autoFor syntax.
To use auto_for:
- iterator should have local scope
- yield should not be use
- OpenMP pragma should not be use
TODO : Yield should block only if it is use in the for loop, not in the
whole function.
"""
auto_for = (isinstance(node.target, ast.Name) and
node.target.id in self.scope[node] and
node.target.id not in self.openmp_deps)
auto_for &= not metadata.get(node, OMPDirective)
auto_for &= node.target.id not in self.openmp_deps
return auto_for | python | def can_use_autofor(self, node):
"""
Check if given for Node can use autoFor syntax.
To use auto_for:
- iterator should have local scope
- yield should not be use
- OpenMP pragma should not be use
TODO : Yield should block only if it is use in the for loop, not in the
whole function.
"""
auto_for = (isinstance(node.target, ast.Name) and
node.target.id in self.scope[node] and
node.target.id not in self.openmp_deps)
auto_for &= not metadata.get(node, OMPDirective)
auto_for &= node.target.id not in self.openmp_deps
return auto_for | [
"def",
"can_use_autofor",
"(",
"self",
",",
"node",
")",
":",
"auto_for",
"=",
"(",
"isinstance",
"(",
"node",
".",
"target",
",",
"ast",
".",
"Name",
")",
"and",
"node",
".",
"target",
".",
"id",
"in",
"self",
".",
"scope",
"[",
"node",
"]",
"and",
"node",
".",
"target",
".",
"id",
"not",
"in",
"self",
".",
"openmp_deps",
")",
"auto_for",
"&=",
"not",
"metadata",
".",
"get",
"(",
"node",
",",
"OMPDirective",
")",
"auto_for",
"&=",
"node",
".",
"target",
".",
"id",
"not",
"in",
"self",
".",
"openmp_deps",
"return",
"auto_for"
] | Check if given for Node can use autoFor syntax.
To use auto_for:
- iterator should have local scope
- yield should not be use
- OpenMP pragma should not be use
TODO : Yield should block only if it is use in the for loop, not in the
whole function. | [
"Check",
"if",
"given",
"for",
"Node",
"can",
"use",
"autoFor",
"syntax",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L630-L647 | train | 232,579 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.can_use_c_for | def can_use_c_for(self, node):
"""
Check if a for loop can use classic C syntax.
To use C syntax:
- target should not be assign in the loop
- xrange should be use as iterator
- order have to be known at compile time
"""
assert isinstance(node.target, ast.Name)
if sys.version_info.major == 3:
range_name = 'range'
else:
range_name = 'xrange'
pattern_range = ast.Call(func=ast.Attribute(
value=ast.Name(id='__builtin__',
ctx=ast.Load(),
annotation=None),
attr=range_name, ctx=ast.Load()),
args=AST_any(), keywords=[])
is_assigned = {node.target.id: False}
[is_assigned.update(self.gather(IsAssigned, stmt))
for stmt in node.body]
nodes = ASTMatcher(pattern_range).search(node.iter)
if (node.iter not in nodes or is_assigned[node.target.id]):
return False
args = node.iter.args
if len(args) < 3:
return True
if isinstance(args[2], ast.Num):
return True
return False | python | def can_use_c_for(self, node):
"""
Check if a for loop can use classic C syntax.
To use C syntax:
- target should not be assign in the loop
- xrange should be use as iterator
- order have to be known at compile time
"""
assert isinstance(node.target, ast.Name)
if sys.version_info.major == 3:
range_name = 'range'
else:
range_name = 'xrange'
pattern_range = ast.Call(func=ast.Attribute(
value=ast.Name(id='__builtin__',
ctx=ast.Load(),
annotation=None),
attr=range_name, ctx=ast.Load()),
args=AST_any(), keywords=[])
is_assigned = {node.target.id: False}
[is_assigned.update(self.gather(IsAssigned, stmt))
for stmt in node.body]
nodes = ASTMatcher(pattern_range).search(node.iter)
if (node.iter not in nodes or is_assigned[node.target.id]):
return False
args = node.iter.args
if len(args) < 3:
return True
if isinstance(args[2], ast.Num):
return True
return False | [
"def",
"can_use_c_for",
"(",
"self",
",",
"node",
")",
":",
"assert",
"isinstance",
"(",
"node",
".",
"target",
",",
"ast",
".",
"Name",
")",
"if",
"sys",
".",
"version_info",
".",
"major",
"==",
"3",
":",
"range_name",
"=",
"'range'",
"else",
":",
"range_name",
"=",
"'xrange'",
"pattern_range",
"=",
"ast",
".",
"Call",
"(",
"func",
"=",
"ast",
".",
"Attribute",
"(",
"value",
"=",
"ast",
".",
"Name",
"(",
"id",
"=",
"'__builtin__'",
",",
"ctx",
"=",
"ast",
".",
"Load",
"(",
")",
",",
"annotation",
"=",
"None",
")",
",",
"attr",
"=",
"range_name",
",",
"ctx",
"=",
"ast",
".",
"Load",
"(",
")",
")",
",",
"args",
"=",
"AST_any",
"(",
")",
",",
"keywords",
"=",
"[",
"]",
")",
"is_assigned",
"=",
"{",
"node",
".",
"target",
".",
"id",
":",
"False",
"}",
"[",
"is_assigned",
".",
"update",
"(",
"self",
".",
"gather",
"(",
"IsAssigned",
",",
"stmt",
")",
")",
"for",
"stmt",
"in",
"node",
".",
"body",
"]",
"nodes",
"=",
"ASTMatcher",
"(",
"pattern_range",
")",
".",
"search",
"(",
"node",
".",
"iter",
")",
"if",
"(",
"node",
".",
"iter",
"not",
"in",
"nodes",
"or",
"is_assigned",
"[",
"node",
".",
"target",
".",
"id",
"]",
")",
":",
"return",
"False",
"args",
"=",
"node",
".",
"iter",
".",
"args",
"if",
"len",
"(",
"args",
")",
"<",
"3",
":",
"return",
"True",
"if",
"isinstance",
"(",
"args",
"[",
"2",
"]",
",",
"ast",
".",
"Num",
")",
":",
"return",
"True",
"return",
"False"
] | Check if a for loop can use classic C syntax.
To use C syntax:
- target should not be assign in the loop
- xrange should be use as iterator
- order have to be known at compile time | [
"Check",
"if",
"a",
"for",
"loop",
"can",
"use",
"classic",
"C",
"syntax",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L649-L682 | train | 232,580 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.visit_For | def visit_For(self, node):
"""
Create For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... work ...
Becomes
>> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX
= __builtin__.xrange(10);
>> ... possible container size reservation ...
>> for (auto&& i: __iterX)
>> ... the work ...
This function also handle assignment for local variables.
We can notice that three kind of loop are possible:
- Normal for loop on iterator
- Autofor loop.
- Normal for loop using integer variable iteration
Kind of loop used depend on OpenMP, yield use and variable scope.
"""
if not isinstance(node.target, ast.Name):
raise PythranSyntaxError(
"Using something other than an identifier as loop target",
node.target)
target = self.visit(node.target)
# Handle the body of the for loop
loop_body = Block([self.visit(stmt) for stmt in node.body])
# Declare local variables at the top of the loop body
loop_body = self.process_locals(node, loop_body, node.target.id)
iterable = self.visit(node.iter)
if self.can_use_c_for(node):
header, loop = self.gen_c_for(node, target, loop_body)
else:
if self.can_use_autofor(node):
header = []
self.ldecls.remove(node.target.id)
autofor = AutoFor(target, iterable, loop_body)
loop = [self.process_omp_attachements(node, autofor)]
else:
# Iterator declaration
local_iter = "__iter{0}".format(id(node))
local_iter_decl = self.types.builder.Assignable(
self.types[node.iter])
self.handle_omp_for(node, local_iter)
# Assign iterable
# For C loop, it avoids issues
# if the upper bound is assigned in the loop
asgnt = self.make_assign(local_iter_decl, local_iter, iterable)
header = [Statement(asgnt)]
loop = self.gen_for(node, target, local_iter, local_iter_decl,
loop_body)
# For xxxComprehension, it is replaced by a for loop. In this case,
# pre-allocate size of container.
for comp in metadata.get(node, metadata.Comprehension):
header.append(Statement("pythonic::utils::reserve({0},{1})".format(
comp.target,
iterable)))
return Block(header + loop) | python | def visit_For(self, node):
"""
Create For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... work ...
Becomes
>> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX
= __builtin__.xrange(10);
>> ... possible container size reservation ...
>> for (auto&& i: __iterX)
>> ... the work ...
This function also handle assignment for local variables.
We can notice that three kind of loop are possible:
- Normal for loop on iterator
- Autofor loop.
- Normal for loop using integer variable iteration
Kind of loop used depend on OpenMP, yield use and variable scope.
"""
if not isinstance(node.target, ast.Name):
raise PythranSyntaxError(
"Using something other than an identifier as loop target",
node.target)
target = self.visit(node.target)
# Handle the body of the for loop
loop_body = Block([self.visit(stmt) for stmt in node.body])
# Declare local variables at the top of the loop body
loop_body = self.process_locals(node, loop_body, node.target.id)
iterable = self.visit(node.iter)
if self.can_use_c_for(node):
header, loop = self.gen_c_for(node, target, loop_body)
else:
if self.can_use_autofor(node):
header = []
self.ldecls.remove(node.target.id)
autofor = AutoFor(target, iterable, loop_body)
loop = [self.process_omp_attachements(node, autofor)]
else:
# Iterator declaration
local_iter = "__iter{0}".format(id(node))
local_iter_decl = self.types.builder.Assignable(
self.types[node.iter])
self.handle_omp_for(node, local_iter)
# Assign iterable
# For C loop, it avoids issues
# if the upper bound is assigned in the loop
asgnt = self.make_assign(local_iter_decl, local_iter, iterable)
header = [Statement(asgnt)]
loop = self.gen_for(node, target, local_iter, local_iter_decl,
loop_body)
# For xxxComprehension, it is replaced by a for loop. In this case,
# pre-allocate size of container.
for comp in metadata.get(node, metadata.Comprehension):
header.append(Statement("pythonic::utils::reserve({0},{1})".format(
comp.target,
iterable)))
return Block(header + loop) | [
"def",
"visit_For",
"(",
"self",
",",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
".",
"target",
",",
"ast",
".",
"Name",
")",
":",
"raise",
"PythranSyntaxError",
"(",
"\"Using something other than an identifier as loop target\"",
",",
"node",
".",
"target",
")",
"target",
"=",
"self",
".",
"visit",
"(",
"node",
".",
"target",
")",
"# Handle the body of the for loop",
"loop_body",
"=",
"Block",
"(",
"[",
"self",
".",
"visit",
"(",
"stmt",
")",
"for",
"stmt",
"in",
"node",
".",
"body",
"]",
")",
"# Declare local variables at the top of the loop body",
"loop_body",
"=",
"self",
".",
"process_locals",
"(",
"node",
",",
"loop_body",
",",
"node",
".",
"target",
".",
"id",
")",
"iterable",
"=",
"self",
".",
"visit",
"(",
"node",
".",
"iter",
")",
"if",
"self",
".",
"can_use_c_for",
"(",
"node",
")",
":",
"header",
",",
"loop",
"=",
"self",
".",
"gen_c_for",
"(",
"node",
",",
"target",
",",
"loop_body",
")",
"else",
":",
"if",
"self",
".",
"can_use_autofor",
"(",
"node",
")",
":",
"header",
"=",
"[",
"]",
"self",
".",
"ldecls",
".",
"remove",
"(",
"node",
".",
"target",
".",
"id",
")",
"autofor",
"=",
"AutoFor",
"(",
"target",
",",
"iterable",
",",
"loop_body",
")",
"loop",
"=",
"[",
"self",
".",
"process_omp_attachements",
"(",
"node",
",",
"autofor",
")",
"]",
"else",
":",
"# Iterator declaration",
"local_iter",
"=",
"\"__iter{0}\"",
".",
"format",
"(",
"id",
"(",
"node",
")",
")",
"local_iter_decl",
"=",
"self",
".",
"types",
".",
"builder",
".",
"Assignable",
"(",
"self",
".",
"types",
"[",
"node",
".",
"iter",
"]",
")",
"self",
".",
"handle_omp_for",
"(",
"node",
",",
"local_iter",
")",
"# Assign iterable",
"# For C loop, it avoids issues",
"# if the upper bound is assigned in the loop",
"asgnt",
"=",
"self",
".",
"make_assign",
"(",
"local_iter_decl",
",",
"local_iter",
",",
"iterable",
")",
"header",
"=",
"[",
"Statement",
"(",
"asgnt",
")",
"]",
"loop",
"=",
"self",
".",
"gen_for",
"(",
"node",
",",
"target",
",",
"local_iter",
",",
"local_iter_decl",
",",
"loop_body",
")",
"# For xxxComprehension, it is replaced by a for loop. In this case,",
"# pre-allocate size of container.",
"for",
"comp",
"in",
"metadata",
".",
"get",
"(",
"node",
",",
"metadata",
".",
"Comprehension",
")",
":",
"header",
".",
"append",
"(",
"Statement",
"(",
"\"pythonic::utils::reserve({0},{1})\"",
".",
"format",
"(",
"comp",
".",
"target",
",",
"iterable",
")",
")",
")",
"return",
"Block",
"(",
"header",
"+",
"loop",
")"
] | Create For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... work ...
Becomes
>> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX
= __builtin__.xrange(10);
>> ... possible container size reservation ...
>> for (auto&& i: __iterX)
>> ... the work ...
This function also handle assignment for local variables.
We can notice that three kind of loop are possible:
- Normal for loop on iterator
- Autofor loop.
- Normal for loop using integer variable iteration
Kind of loop used depend on OpenMP, yield use and variable scope. | [
"Create",
"For",
"representation",
"for",
"Cxx",
"generation",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L688-L758 | train | 232,581 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.visit_While | def visit_While(self, node):
"""
Create While node for Cxx generation.
It is a cxx_loop to handle else clause.
"""
test = self.visit(node.test)
body = [self.visit(n) for n in node.body]
stmt = While(test, Block(body))
return self.process_omp_attachements(node, stmt) | python | def visit_While(self, node):
"""
Create While node for Cxx generation.
It is a cxx_loop to handle else clause.
"""
test = self.visit(node.test)
body = [self.visit(n) for n in node.body]
stmt = While(test, Block(body))
return self.process_omp_attachements(node, stmt) | [
"def",
"visit_While",
"(",
"self",
",",
"node",
")",
":",
"test",
"=",
"self",
".",
"visit",
"(",
"node",
".",
"test",
")",
"body",
"=",
"[",
"self",
".",
"visit",
"(",
"n",
")",
"for",
"n",
"in",
"node",
".",
"body",
"]",
"stmt",
"=",
"While",
"(",
"test",
",",
"Block",
"(",
"body",
")",
")",
"return",
"self",
".",
"process_omp_attachements",
"(",
"node",
",",
"stmt",
")"
] | Create While node for Cxx generation.
It is a cxx_loop to handle else clause. | [
"Create",
"While",
"node",
"for",
"Cxx",
"generation",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L761-L770 | train | 232,582 |
serge-sans-paille/pythran | pythran/backend.py | CxxFunction.visit_Break | def visit_Break(self, _):
"""
Generate break statement in most case and goto for orelse clause.
See Also : cxx_loop
"""
if self.break_handlers and self.break_handlers[-1]:
return Statement("goto {0}".format(self.break_handlers[-1]))
else:
return Statement("break") | python | def visit_Break(self, _):
"""
Generate break statement in most case and goto for orelse clause.
See Also : cxx_loop
"""
if self.break_handlers and self.break_handlers[-1]:
return Statement("goto {0}".format(self.break_handlers[-1]))
else:
return Statement("break") | [
"def",
"visit_Break",
"(",
"self",
",",
"_",
")",
":",
"if",
"self",
".",
"break_handlers",
"and",
"self",
".",
"break_handlers",
"[",
"-",
"1",
"]",
":",
"return",
"Statement",
"(",
"\"goto {0}\"",
".",
"format",
"(",
"self",
".",
"break_handlers",
"[",
"-",
"1",
"]",
")",
")",
"else",
":",
"return",
"Statement",
"(",
"\"break\"",
")"
] | Generate break statement in most case and goto for orelse clause.
See Also : cxx_loop | [
"Generate",
"break",
"statement",
"in",
"most",
"case",
"and",
"goto",
"for",
"orelse",
"clause",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L827-L836 | train | 232,583 |
serge-sans-paille/pythran | pythran/backend.py | Cxx.visit_Module | def visit_Module(self, node):
""" Build a compilation unit. """
# build all types
deps = sorted(self.dependencies)
headers = [Include(os.path.join("pythonic", "include", *t) + ".hpp")
for t in deps]
headers += [Include(os.path.join("pythonic", *t) + ".hpp")
for t in deps]
decls_n_defns = [self.visit(stmt) for stmt in node.body]
decls, defns = zip(*[s for s in decls_n_defns if s])
nsbody = [s for ls in decls + defns for s in ls]
ns = Namespace(pythran_ward + self.passmanager.module_name, nsbody)
self.result = CompilationUnit(headers + [ns]) | python | def visit_Module(self, node):
""" Build a compilation unit. """
# build all types
deps = sorted(self.dependencies)
headers = [Include(os.path.join("pythonic", "include", *t) + ".hpp")
for t in deps]
headers += [Include(os.path.join("pythonic", *t) + ".hpp")
for t in deps]
decls_n_defns = [self.visit(stmt) for stmt in node.body]
decls, defns = zip(*[s for s in decls_n_defns if s])
nsbody = [s for ls in decls + defns for s in ls]
ns = Namespace(pythran_ward + self.passmanager.module_name, nsbody)
self.result = CompilationUnit(headers + [ns]) | [
"def",
"visit_Module",
"(",
"self",
",",
"node",
")",
":",
"# build all types",
"deps",
"=",
"sorted",
"(",
"self",
".",
"dependencies",
")",
"headers",
"=",
"[",
"Include",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"pythonic\"",
",",
"\"include\"",
",",
"*",
"t",
")",
"+",
"\".hpp\"",
")",
"for",
"t",
"in",
"deps",
"]",
"headers",
"+=",
"[",
"Include",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"pythonic\"",
",",
"*",
"t",
")",
"+",
"\".hpp\"",
")",
"for",
"t",
"in",
"deps",
"]",
"decls_n_defns",
"=",
"[",
"self",
".",
"visit",
"(",
"stmt",
")",
"for",
"stmt",
"in",
"node",
".",
"body",
"]",
"decls",
",",
"defns",
"=",
"zip",
"(",
"*",
"[",
"s",
"for",
"s",
"in",
"decls_n_defns",
"if",
"s",
"]",
")",
"nsbody",
"=",
"[",
"s",
"for",
"ls",
"in",
"decls",
"+",
"defns",
"for",
"s",
"in",
"ls",
"]",
"ns",
"=",
"Namespace",
"(",
"pythran_ward",
"+",
"self",
".",
"passmanager",
".",
"module_name",
",",
"nsbody",
")",
"self",
".",
"result",
"=",
"CompilationUnit",
"(",
"headers",
"+",
"[",
"ns",
"]",
")"
] | Build a compilation unit. | [
"Build",
"a",
"compilation",
"unit",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L1275-L1289 | train | 232,584 |
serge-sans-paille/pythran | pythran/middlend.py | refine | def refine(pm, node, optimizations):
""" Refine node in place until it matches pythran's expectations. """
# Sanitize input
pm.apply(ExpandGlobals, node)
pm.apply(ExpandImportAll, node)
pm.apply(NormalizeTuples, node)
pm.apply(ExpandBuiltins, node)
pm.apply(ExpandImports, node)
pm.apply(NormalizeMethodCalls, node)
pm.apply(NormalizeIsNone, node)
pm.apply(SplitStaticExpression, node)
pm.apply(NormalizeStaticIf, node)
pm.apply(NormalizeTuples, node)
pm.apply(NormalizeException, node)
pm.apply(NormalizeMethodCalls, node)
# Some early optimizations
pm.apply(ComprehensionPatterns, node)
pm.apply(RemoveLambdas, node)
pm.apply(RemoveNestedFunctions, node)
pm.apply(NormalizeCompare, node)
pm.gather(ExtendedSyntaxCheck, node)
pm.apply(ListCompToGenexp, node)
pm.apply(RemoveComprehension, node)
pm.apply(RemoveNamedArguments, node)
# sanitize input
pm.apply(NormalizeReturn, node)
pm.apply(UnshadowParameters, node)
pm.apply(FalsePolymorphism, node)
# some extra optimizations
apply_optimisation = True
while apply_optimisation:
apply_optimisation = False
for optimization in optimizations:
apply_optimisation |= pm.apply(optimization, node)[0] | python | def refine(pm, node, optimizations):
""" Refine node in place until it matches pythran's expectations. """
# Sanitize input
pm.apply(ExpandGlobals, node)
pm.apply(ExpandImportAll, node)
pm.apply(NormalizeTuples, node)
pm.apply(ExpandBuiltins, node)
pm.apply(ExpandImports, node)
pm.apply(NormalizeMethodCalls, node)
pm.apply(NormalizeIsNone, node)
pm.apply(SplitStaticExpression, node)
pm.apply(NormalizeStaticIf, node)
pm.apply(NormalizeTuples, node)
pm.apply(NormalizeException, node)
pm.apply(NormalizeMethodCalls, node)
# Some early optimizations
pm.apply(ComprehensionPatterns, node)
pm.apply(RemoveLambdas, node)
pm.apply(RemoveNestedFunctions, node)
pm.apply(NormalizeCompare, node)
pm.gather(ExtendedSyntaxCheck, node)
pm.apply(ListCompToGenexp, node)
pm.apply(RemoveComprehension, node)
pm.apply(RemoveNamedArguments, node)
# sanitize input
pm.apply(NormalizeReturn, node)
pm.apply(UnshadowParameters, node)
pm.apply(FalsePolymorphism, node)
# some extra optimizations
apply_optimisation = True
while apply_optimisation:
apply_optimisation = False
for optimization in optimizations:
apply_optimisation |= pm.apply(optimization, node)[0] | [
"def",
"refine",
"(",
"pm",
",",
"node",
",",
"optimizations",
")",
":",
"# Sanitize input",
"pm",
".",
"apply",
"(",
"ExpandGlobals",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"ExpandImportAll",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeTuples",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"ExpandBuiltins",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"ExpandImports",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeMethodCalls",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeIsNone",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"SplitStaticExpression",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeStaticIf",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeTuples",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeException",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeMethodCalls",
",",
"node",
")",
"# Some early optimizations",
"pm",
".",
"apply",
"(",
"ComprehensionPatterns",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"RemoveLambdas",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"RemoveNestedFunctions",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"NormalizeCompare",
",",
"node",
")",
"pm",
".",
"gather",
"(",
"ExtendedSyntaxCheck",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"ListCompToGenexp",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"RemoveComprehension",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"RemoveNamedArguments",
",",
"node",
")",
"# sanitize input",
"pm",
".",
"apply",
"(",
"NormalizeReturn",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"UnshadowParameters",
",",
"node",
")",
"pm",
".",
"apply",
"(",
"FalsePolymorphism",
",",
"node",
")",
"# some extra optimizations",
"apply_optimisation",
"=",
"True",
"while",
"apply_optimisation",
":",
"apply_optimisation",
"=",
"False",
"for",
"optimization",
"in",
"optimizations",
":",
"apply_optimisation",
"|=",
"pm",
".",
"apply",
"(",
"optimization",
",",
"node",
")",
"[",
"0",
"]"
] | Refine node in place until it matches pythran's expectations. | [
"Refine",
"node",
"in",
"place",
"until",
"it",
"matches",
"pythran",
"s",
"expectations",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/middlend.py#L16-L55 | train | 232,585 |
serge-sans-paille/pythran | pythran/analyses/global_effects.py | GlobalEffects.prepare | def prepare(self, node):
"""
Initialise globals effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions.
"""
super(GlobalEffects, self).prepare(node)
def register_node(module):
""" Recursively save globals effect for all functions. """
for v in module.values():
if isinstance(v, dict): # Submodule case
register_node(v)
else:
fe = GlobalEffects.FunctionEffect(v)
self.node_to_functioneffect[v] = fe
self.result.add_node(fe)
if isinstance(v, intrinsic.Class):
register_node(v.fields)
register_node(self.global_declarations)
for module in MODULES.values():
register_node(module)
self.node_to_functioneffect[intrinsic.UnboundValue] = \
GlobalEffects.FunctionEffect(intrinsic.UnboundValue) | python | def prepare(self, node):
"""
Initialise globals effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions.
"""
super(GlobalEffects, self).prepare(node)
def register_node(module):
""" Recursively save globals effect for all functions. """
for v in module.values():
if isinstance(v, dict): # Submodule case
register_node(v)
else:
fe = GlobalEffects.FunctionEffect(v)
self.node_to_functioneffect[v] = fe
self.result.add_node(fe)
if isinstance(v, intrinsic.Class):
register_node(v.fields)
register_node(self.global_declarations)
for module in MODULES.values():
register_node(module)
self.node_to_functioneffect[intrinsic.UnboundValue] = \
GlobalEffects.FunctionEffect(intrinsic.UnboundValue) | [
"def",
"prepare",
"(",
"self",
",",
"node",
")",
":",
"super",
"(",
"GlobalEffects",
",",
"self",
")",
".",
"prepare",
"(",
"node",
")",
"def",
"register_node",
"(",
"module",
")",
":",
"\"\"\" Recursively save globals effect for all functions. \"\"\"",
"for",
"v",
"in",
"module",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"# Submodule case",
"register_node",
"(",
"v",
")",
"else",
":",
"fe",
"=",
"GlobalEffects",
".",
"FunctionEffect",
"(",
"v",
")",
"self",
".",
"node_to_functioneffect",
"[",
"v",
"]",
"=",
"fe",
"self",
".",
"result",
".",
"add_node",
"(",
"fe",
")",
"if",
"isinstance",
"(",
"v",
",",
"intrinsic",
".",
"Class",
")",
":",
"register_node",
"(",
"v",
".",
"fields",
")",
"register_node",
"(",
"self",
".",
"global_declarations",
")",
"for",
"module",
"in",
"MODULES",
".",
"values",
"(",
")",
":",
"register_node",
"(",
"module",
")",
"self",
".",
"node_to_functioneffect",
"[",
"intrinsic",
".",
"UnboundValue",
"]",
"=",
"GlobalEffects",
".",
"FunctionEffect",
"(",
"intrinsic",
".",
"UnboundValue",
")"
] | Initialise globals effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions. | [
"Initialise",
"globals",
"effects",
"as",
"this",
"analyse",
"is",
"inter",
"-",
"procedural",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/global_effects.py#L43-L68 | train | 232,586 |
serge-sans-paille/pythran | pythran/types/types.py | Types.prepare | def prepare(self, node):
"""
Initialise values to prepare typing computation.
Reorder functions to avoid dependencies issues and prepare typing
computation setting typing values for Pythonic functions.
"""
def register(name, module):
""" Recursively save function typing and combiners for Pythonic."""
for fname, function in module.items():
if isinstance(function, dict):
register(name + "::" + fname, function)
else:
tname = 'pythonic::{0}::functor::{1}'.format(name, fname)
self.result[function] = self.builder.NamedType(tname)
self.combiners[function] = function
if isinstance(function, Class):
register(name + "::" + fname, function.fields)
for mname, module in MODULES.items():
register(mname, module)
super(Types, self).prepare(node) | python | def prepare(self, node):
"""
Initialise values to prepare typing computation.
Reorder functions to avoid dependencies issues and prepare typing
computation setting typing values for Pythonic functions.
"""
def register(name, module):
""" Recursively save function typing and combiners for Pythonic."""
for fname, function in module.items():
if isinstance(function, dict):
register(name + "::" + fname, function)
else:
tname = 'pythonic::{0}::functor::{1}'.format(name, fname)
self.result[function] = self.builder.NamedType(tname)
self.combiners[function] = function
if isinstance(function, Class):
register(name + "::" + fname, function.fields)
for mname, module in MODULES.items():
register(mname, module)
super(Types, self).prepare(node) | [
"def",
"prepare",
"(",
"self",
",",
"node",
")",
":",
"def",
"register",
"(",
"name",
",",
"module",
")",
":",
"\"\"\" Recursively save function typing and combiners for Pythonic.\"\"\"",
"for",
"fname",
",",
"function",
"in",
"module",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"function",
",",
"dict",
")",
":",
"register",
"(",
"name",
"+",
"\"::\"",
"+",
"fname",
",",
"function",
")",
"else",
":",
"tname",
"=",
"'pythonic::{0}::functor::{1}'",
".",
"format",
"(",
"name",
",",
"fname",
")",
"self",
".",
"result",
"[",
"function",
"]",
"=",
"self",
".",
"builder",
".",
"NamedType",
"(",
"tname",
")",
"self",
".",
"combiners",
"[",
"function",
"]",
"=",
"function",
"if",
"isinstance",
"(",
"function",
",",
"Class",
")",
":",
"register",
"(",
"name",
"+",
"\"::\"",
"+",
"fname",
",",
"function",
".",
"fields",
")",
"for",
"mname",
",",
"module",
"in",
"MODULES",
".",
"items",
"(",
")",
":",
"register",
"(",
"mname",
",",
"module",
")",
"super",
"(",
"Types",
",",
"self",
")",
".",
"prepare",
"(",
"node",
")"
] | Initialise values to prepare typing computation.
Reorder functions to avoid dependencies issues and prepare typing
computation setting typing values for Pythonic functions. | [
"Initialise",
"values",
"to",
"prepare",
"typing",
"computation",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L75-L97 | train | 232,587 |
serge-sans-paille/pythran | pythran/types/types.py | Types.register | def register(self, ptype):
"""register ptype as a local typedef"""
# Too many of them leads to memory burst
if len(self.typedefs) < cfg.getint('typing', 'max_combiner'):
self.typedefs.append(ptype)
return True
return False | python | def register(self, ptype):
"""register ptype as a local typedef"""
# Too many of them leads to memory burst
if len(self.typedefs) < cfg.getint('typing', 'max_combiner'):
self.typedefs.append(ptype)
return True
return False | [
"def",
"register",
"(",
"self",
",",
"ptype",
")",
":",
"# Too many of them leads to memory burst",
"if",
"len",
"(",
"self",
".",
"typedefs",
")",
"<",
"cfg",
".",
"getint",
"(",
"'typing'",
",",
"'max_combiner'",
")",
":",
"self",
".",
"typedefs",
".",
"append",
"(",
"ptype",
")",
"return",
"True",
"return",
"False"
] | register ptype as a local typedef | [
"register",
"ptype",
"as",
"a",
"local",
"typedef"
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L106-L112 | train | 232,588 |
serge-sans-paille/pythran | pythran/types/types.py | Types.isargument | def isargument(self, node):
""" checks whether node aliases to a parameter."""
try:
node_id, _ = self.node_to_id(node)
return (node_id in self.name_to_nodes and
any([isinstance(n, ast.Name) and
isinstance(n.ctx, ast.Param)
for n in self.name_to_nodes[node_id]]))
except UnboundableRValue:
return False | python | def isargument(self, node):
""" checks whether node aliases to a parameter."""
try:
node_id, _ = self.node_to_id(node)
return (node_id in self.name_to_nodes and
any([isinstance(n, ast.Name) and
isinstance(n.ctx, ast.Param)
for n in self.name_to_nodes[node_id]]))
except UnboundableRValue:
return False | [
"def",
"isargument",
"(",
"self",
",",
"node",
")",
":",
"try",
":",
"node_id",
",",
"_",
"=",
"self",
".",
"node_to_id",
"(",
"node",
")",
"return",
"(",
"node_id",
"in",
"self",
".",
"name_to_nodes",
"and",
"any",
"(",
"[",
"isinstance",
"(",
"n",
",",
"ast",
".",
"Name",
")",
"and",
"isinstance",
"(",
"n",
".",
"ctx",
",",
"ast",
".",
"Param",
")",
"for",
"n",
"in",
"self",
".",
"name_to_nodes",
"[",
"node_id",
"]",
"]",
")",
")",
"except",
"UnboundableRValue",
":",
"return",
"False"
] | checks whether node aliases to a parameter. | [
"checks",
"whether",
"node",
"aliases",
"to",
"a",
"parameter",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L133-L142 | train | 232,589 |
serge-sans-paille/pythran | pythran/types/types.py | Types.combine | def combine(self, node, othernode, op=None, unary_op=None, register=False,
aliasing_type=False):
"""
Change `node` typing with combination of `node` and `othernode`.
Parameters
----------
aliasing_type : bool
All node aliasing to `node` have to be updated too.
"""
if self.result[othernode] is self.builder.UnknownType:
if node not in self.result:
self.result[node] = self.builder.UnknownType
return
if aliasing_type:
self.combine_(node, othernode, op or operator.add,
unary_op or (lambda x: x), register)
for a in self.strict_aliases[node]:
self.combine_(a, othernode, op or operator.add,
unary_op or (lambda x: x), register)
else:
self.combine_(node, othernode, op or operator.add,
unary_op or (lambda x: x), register) | python | def combine(self, node, othernode, op=None, unary_op=None, register=False,
aliasing_type=False):
"""
Change `node` typing with combination of `node` and `othernode`.
Parameters
----------
aliasing_type : bool
All node aliasing to `node` have to be updated too.
"""
if self.result[othernode] is self.builder.UnknownType:
if node not in self.result:
self.result[node] = self.builder.UnknownType
return
if aliasing_type:
self.combine_(node, othernode, op or operator.add,
unary_op or (lambda x: x), register)
for a in self.strict_aliases[node]:
self.combine_(a, othernode, op or operator.add,
unary_op or (lambda x: x), register)
else:
self.combine_(node, othernode, op or operator.add,
unary_op or (lambda x: x), register) | [
"def",
"combine",
"(",
"self",
",",
"node",
",",
"othernode",
",",
"op",
"=",
"None",
",",
"unary_op",
"=",
"None",
",",
"register",
"=",
"False",
",",
"aliasing_type",
"=",
"False",
")",
":",
"if",
"self",
".",
"result",
"[",
"othernode",
"]",
"is",
"self",
".",
"builder",
".",
"UnknownType",
":",
"if",
"node",
"not",
"in",
"self",
".",
"result",
":",
"self",
".",
"result",
"[",
"node",
"]",
"=",
"self",
".",
"builder",
".",
"UnknownType",
"return",
"if",
"aliasing_type",
":",
"self",
".",
"combine_",
"(",
"node",
",",
"othernode",
",",
"op",
"or",
"operator",
".",
"add",
",",
"unary_op",
"or",
"(",
"lambda",
"x",
":",
"x",
")",
",",
"register",
")",
"for",
"a",
"in",
"self",
".",
"strict_aliases",
"[",
"node",
"]",
":",
"self",
".",
"combine_",
"(",
"a",
",",
"othernode",
",",
"op",
"or",
"operator",
".",
"add",
",",
"unary_op",
"or",
"(",
"lambda",
"x",
":",
"x",
")",
",",
"register",
")",
"else",
":",
"self",
".",
"combine_",
"(",
"node",
",",
"othernode",
",",
"op",
"or",
"operator",
".",
"add",
",",
"unary_op",
"or",
"(",
"lambda",
"x",
":",
"x",
")",
",",
"register",
")"
] | Change `node` typing with combination of `node` and `othernode`.
Parameters
----------
aliasing_type : bool
All node aliasing to `node` have to be updated too. | [
"Change",
"node",
"typing",
"with",
"combination",
"of",
"node",
"and",
"othernode",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L144-L167 | train | 232,590 |
serge-sans-paille/pythran | pythran/types/types.py | Types.visit_Return | def visit_Return(self, node):
""" Compute return type and merges with others possible return type."""
self.generic_visit(node)
# No merge are done if the function is a generator.
if not self.yield_points:
assert node.value, "Values were added in each return statement."
self.combine(self.current, node.value) | python | def visit_Return(self, node):
""" Compute return type and merges with others possible return type."""
self.generic_visit(node)
# No merge are done if the function is a generator.
if not self.yield_points:
assert node.value, "Values were added in each return statement."
self.combine(self.current, node.value) | [
"def",
"visit_Return",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"generic_visit",
"(",
"node",
")",
"# No merge are done if the function is a generator.",
"if",
"not",
"self",
".",
"yield_points",
":",
"assert",
"node",
".",
"value",
",",
"\"Values were added in each return statement.\"",
"self",
".",
"combine",
"(",
"self",
".",
"current",
",",
"node",
".",
"value",
")"
] | Compute return type and merges with others possible return type. | [
"Compute",
"return",
"type",
"and",
"merges",
"with",
"others",
"possible",
"return",
"type",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L293-L299 | train | 232,591 |
serge-sans-paille/pythran | pythran/types/types.py | Types.visit_Yield | def visit_Yield(self, node):
""" Compute yield type and merges it with others yield type. """
self.generic_visit(node)
self.combine(self.current, node.value) | python | def visit_Yield(self, node):
""" Compute yield type and merges it with others yield type. """
self.generic_visit(node)
self.combine(self.current, node.value) | [
"def",
"visit_Yield",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"generic_visit",
"(",
"node",
")",
"self",
".",
"combine",
"(",
"self",
".",
"current",
",",
"node",
".",
"value",
")"
] | Compute yield type and merges it with others yield type. | [
"Compute",
"yield",
"type",
"and",
"merges",
"it",
"with",
"others",
"yield",
"type",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L301-L304 | train | 232,592 |
serge-sans-paille/pythran | pythran/types/types.py | Types.visit_BoolOp | def visit_BoolOp(self, node):
"""
Merge BoolOp operand type.
BoolOp are "and" and "or" and may return any of these results so all
operands should have the combinable type.
"""
# Visit subnodes
self.generic_visit(node)
# Merge all operands types.
[self.combine(node, value) for value in node.values] | python | def visit_BoolOp(self, node):
"""
Merge BoolOp operand type.
BoolOp are "and" and "or" and may return any of these results so all
operands should have the combinable type.
"""
# Visit subnodes
self.generic_visit(node)
# Merge all operands types.
[self.combine(node, value) for value in node.values] | [
"def",
"visit_BoolOp",
"(",
"self",
",",
"node",
")",
":",
"# Visit subnodes",
"self",
".",
"generic_visit",
"(",
"node",
")",
"# Merge all operands types.",
"[",
"self",
".",
"combine",
"(",
"node",
",",
"value",
")",
"for",
"value",
"in",
"node",
".",
"values",
"]"
] | Merge BoolOp operand type.
BoolOp are "and" and "or" and may return any of these results so all
operands should have the combinable type. | [
"Merge",
"BoolOp",
"operand",
"type",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L349-L359 | train | 232,593 |
serge-sans-paille/pythran | pythran/types/types.py | Types.visit_Num | def visit_Num(self, node):
"""
Set type for number.
It could be int, long or float so we use the default python to pythonic
type converter.
"""
ty = type(node.n)
sty = pytype_to_ctype(ty)
if node in self.immediates:
sty = "std::integral_constant<%s, %s>" % (sty, node.n)
self.result[node] = self.builder.NamedType(sty) | python | def visit_Num(self, node):
"""
Set type for number.
It could be int, long or float so we use the default python to pythonic
type converter.
"""
ty = type(node.n)
sty = pytype_to_ctype(ty)
if node in self.immediates:
sty = "std::integral_constant<%s, %s>" % (sty, node.n)
self.result[node] = self.builder.NamedType(sty) | [
"def",
"visit_Num",
"(",
"self",
",",
"node",
")",
":",
"ty",
"=",
"type",
"(",
"node",
".",
"n",
")",
"sty",
"=",
"pytype_to_ctype",
"(",
"ty",
")",
"if",
"node",
"in",
"self",
".",
"immediates",
":",
"sty",
"=",
"\"std::integral_constant<%s, %s>\"",
"%",
"(",
"sty",
",",
"node",
".",
"n",
")",
"self",
".",
"result",
"[",
"node",
"]",
"=",
"self",
".",
"builder",
".",
"NamedType",
"(",
"sty",
")"
] | Set type for number.
It could be int, long or float so we use the default python to pythonic
type converter. | [
"Set",
"type",
"for",
"number",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L450-L462 | train | 232,594 |
serge-sans-paille/pythran | pythran/types/types.py | Types.visit_Str | def visit_Str(self, node):
""" Set the pythonic string type. """
self.result[node] = self.builder.NamedType(pytype_to_ctype(str)) | python | def visit_Str(self, node):
""" Set the pythonic string type. """
self.result[node] = self.builder.NamedType(pytype_to_ctype(str)) | [
"def",
"visit_Str",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"result",
"[",
"node",
"]",
"=",
"self",
".",
"builder",
".",
"NamedType",
"(",
"pytype_to_ctype",
"(",
"str",
")",
")"
] | Set the pythonic string type. | [
"Set",
"the",
"pythonic",
"string",
"type",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L464-L466 | train | 232,595 |
serge-sans-paille/pythran | pythran/types/types.py | Types.visit_Attribute | def visit_Attribute(self, node):
""" Compute typing for an attribute node. """
obj, path = attr_to_path(node)
# If no type is given, use a decltype
if obj.isliteral():
typename = pytype_to_ctype(obj.signature)
self.result[node] = self.builder.NamedType(typename)
else:
self.result[node] = self.builder.DeclType('::'.join(path) + '{}') | python | def visit_Attribute(self, node):
""" Compute typing for an attribute node. """
obj, path = attr_to_path(node)
# If no type is given, use a decltype
if obj.isliteral():
typename = pytype_to_ctype(obj.signature)
self.result[node] = self.builder.NamedType(typename)
else:
self.result[node] = self.builder.DeclType('::'.join(path) + '{}') | [
"def",
"visit_Attribute",
"(",
"self",
",",
"node",
")",
":",
"obj",
",",
"path",
"=",
"attr_to_path",
"(",
"node",
")",
"# If no type is given, use a decltype",
"if",
"obj",
".",
"isliteral",
"(",
")",
":",
"typename",
"=",
"pytype_to_ctype",
"(",
"obj",
".",
"signature",
")",
"self",
".",
"result",
"[",
"node",
"]",
"=",
"self",
".",
"builder",
".",
"NamedType",
"(",
"typename",
")",
"else",
":",
"self",
".",
"result",
"[",
"node",
"]",
"=",
"self",
".",
"builder",
".",
"DeclType",
"(",
"'::'",
".",
"join",
"(",
"path",
")",
"+",
"'{}'",
")"
] | Compute typing for an attribute node. | [
"Compute",
"typing",
"for",
"an",
"attribute",
"node",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L468-L476 | train | 232,596 |
serge-sans-paille/pythran | pythran/types/types.py | Types.visit_Slice | def visit_Slice(self, node):
"""
Set slicing type using continuous information if provided.
Also visit subnodes as they may contains relevant typing information.
"""
self.generic_visit(node)
if node.step is None or (isinstance(node.step, ast.Num) and
node.step.n == 1):
self.result[node] = self.builder.NamedType(
'pythonic::types::contiguous_slice')
else:
self.result[node] = self.builder.NamedType(
'pythonic::types::slice') | python | def visit_Slice(self, node):
"""
Set slicing type using continuous information if provided.
Also visit subnodes as they may contains relevant typing information.
"""
self.generic_visit(node)
if node.step is None or (isinstance(node.step, ast.Num) and
node.step.n == 1):
self.result[node] = self.builder.NamedType(
'pythonic::types::contiguous_slice')
else:
self.result[node] = self.builder.NamedType(
'pythonic::types::slice') | [
"def",
"visit_Slice",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"generic_visit",
"(",
"node",
")",
"if",
"node",
".",
"step",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"node",
".",
"step",
",",
"ast",
".",
"Num",
")",
"and",
"node",
".",
"step",
".",
"n",
"==",
"1",
")",
":",
"self",
".",
"result",
"[",
"node",
"]",
"=",
"self",
".",
"builder",
".",
"NamedType",
"(",
"'pythonic::types::contiguous_slice'",
")",
"else",
":",
"self",
".",
"result",
"[",
"node",
"]",
"=",
"self",
".",
"builder",
".",
"NamedType",
"(",
"'pythonic::types::slice'",
")"
] | Set slicing type using continuous information if provided.
Also visit subnodes as they may contains relevant typing information. | [
"Set",
"slicing",
"type",
"using",
"continuous",
"information",
"if",
"provided",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L478-L491 | train | 232,597 |
serge-sans-paille/pythran | omp/__init__.py | OpenMP.init_not_msvc | def init_not_msvc(self):
""" Find OpenMP library and try to load if using ctype interface. """
# find_library() does not search automatically LD_LIBRARY_PATH
paths = os.environ.get('LD_LIBRARY_PATH', '').split(':')
for gomp in ('libgomp.so', 'libgomp.dylib'):
if cxx is None:
continue
cmd = [cxx, '-print-file-name=' + gomp]
# the subprocess can fail in various ways
# in that case just give up that path
try:
path = os.path.dirname(check_output(cmd).strip())
if path:
paths.append(path)
except OSError:
pass
# Try to load find libgomp shared library using loader search dirs
libgomp_path = find_library("gomp")
# Try to use custom paths if lookup failed
for path in paths:
if libgomp_path:
break
path = path.strip()
if os.path.isdir(path):
libgomp_path = find_library(os.path.join(str(path), "libgomp"))
if not libgomp_path:
raise ImportError("I can't find a shared library for libgomp,"
" you may need to install it or adjust the "
"LD_LIBRARY_PATH environment variable.")
else:
# Load the library (shouldn't fail with an absolute path right?)
self.libomp = ctypes.CDLL(libgomp_path)
self.version = 45 | python | def init_not_msvc(self):
""" Find OpenMP library and try to load if using ctype interface. """
# find_library() does not search automatically LD_LIBRARY_PATH
paths = os.environ.get('LD_LIBRARY_PATH', '').split(':')
for gomp in ('libgomp.so', 'libgomp.dylib'):
if cxx is None:
continue
cmd = [cxx, '-print-file-name=' + gomp]
# the subprocess can fail in various ways
# in that case just give up that path
try:
path = os.path.dirname(check_output(cmd).strip())
if path:
paths.append(path)
except OSError:
pass
# Try to load find libgomp shared library using loader search dirs
libgomp_path = find_library("gomp")
# Try to use custom paths if lookup failed
for path in paths:
if libgomp_path:
break
path = path.strip()
if os.path.isdir(path):
libgomp_path = find_library(os.path.join(str(path), "libgomp"))
if not libgomp_path:
raise ImportError("I can't find a shared library for libgomp,"
" you may need to install it or adjust the "
"LD_LIBRARY_PATH environment variable.")
else:
# Load the library (shouldn't fail with an absolute path right?)
self.libomp = ctypes.CDLL(libgomp_path)
self.version = 45 | [
"def",
"init_not_msvc",
"(",
"self",
")",
":",
"# find_library() does not search automatically LD_LIBRARY_PATH",
"paths",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'LD_LIBRARY_PATH'",
",",
"''",
")",
".",
"split",
"(",
"':'",
")",
"for",
"gomp",
"in",
"(",
"'libgomp.so'",
",",
"'libgomp.dylib'",
")",
":",
"if",
"cxx",
"is",
"None",
":",
"continue",
"cmd",
"=",
"[",
"cxx",
",",
"'-print-file-name='",
"+",
"gomp",
"]",
"# the subprocess can fail in various ways",
"# in that case just give up that path",
"try",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"check_output",
"(",
"cmd",
")",
".",
"strip",
"(",
")",
")",
"if",
"path",
":",
"paths",
".",
"append",
"(",
"path",
")",
"except",
"OSError",
":",
"pass",
"# Try to load find libgomp shared library using loader search dirs",
"libgomp_path",
"=",
"find_library",
"(",
"\"gomp\"",
")",
"# Try to use custom paths if lookup failed",
"for",
"path",
"in",
"paths",
":",
"if",
"libgomp_path",
":",
"break",
"path",
"=",
"path",
".",
"strip",
"(",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"libgomp_path",
"=",
"find_library",
"(",
"os",
".",
"path",
".",
"join",
"(",
"str",
"(",
"path",
")",
",",
"\"libgomp\"",
")",
")",
"if",
"not",
"libgomp_path",
":",
"raise",
"ImportError",
"(",
"\"I can't find a shared library for libgomp,\"",
"\" you may need to install it or adjust the \"",
"\"LD_LIBRARY_PATH environment variable.\"",
")",
"else",
":",
"# Load the library (shouldn't fail with an absolute path right?)",
"self",
".",
"libomp",
"=",
"ctypes",
".",
"CDLL",
"(",
"libgomp_path",
")",
"self",
".",
"version",
"=",
"45"
] | Find OpenMP library and try to load if using ctype interface. | [
"Find",
"OpenMP",
"library",
"and",
"try",
"to",
"load",
"if",
"using",
"ctype",
"interface",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/omp/__init__.py#L44-L79 | train | 232,598 |
serge-sans-paille/pythran | pythran/analyses/inlinable.py | Inlinable.visit_FunctionDef | def visit_FunctionDef(self, node):
""" Determine this function definition can be inlined. """
if (len(node.body) == 1 and
isinstance(node.body[0], (ast.Call, ast.Return))):
ids = self.gather(Identifiers, node.body[0])
# FIXME : It mark "not inlinable" def foo(foo): return foo
if node.name not in ids:
self.result[node.name] = copy.deepcopy(node) | python | def visit_FunctionDef(self, node):
""" Determine this function definition can be inlined. """
if (len(node.body) == 1 and
isinstance(node.body[0], (ast.Call, ast.Return))):
ids = self.gather(Identifiers, node.body[0])
# FIXME : It mark "not inlinable" def foo(foo): return foo
if node.name not in ids:
self.result[node.name] = copy.deepcopy(node) | [
"def",
"visit_FunctionDef",
"(",
"self",
",",
"node",
")",
":",
"if",
"(",
"len",
"(",
"node",
".",
"body",
")",
"==",
"1",
"and",
"isinstance",
"(",
"node",
".",
"body",
"[",
"0",
"]",
",",
"(",
"ast",
".",
"Call",
",",
"ast",
".",
"Return",
")",
")",
")",
":",
"ids",
"=",
"self",
".",
"gather",
"(",
"Identifiers",
",",
"node",
".",
"body",
"[",
"0",
"]",
")",
"# FIXME : It mark \"not inlinable\" def foo(foo): return foo",
"if",
"node",
".",
"name",
"not",
"in",
"ids",
":",
"self",
".",
"result",
"[",
"node",
".",
"name",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"node",
")"
] | Determine this function definition can be inlined. | [
"Determine",
"this",
"function",
"definition",
"can",
"be",
"inlined",
"."
] | 7e1b5af2dddfabc50bd2a977f0178be269b349b5 | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/inlinable.py#L22-L29 | train | 232,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.