repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
partofthething/ace | ace/smoother.py | Smoother.plot | def plot(self, fname=None):
"""
Plot the input data and resulting smooth.
Parameters
----------
fname : str, optional
name of file to produce. If none, will show interactively.
"""
plt.figure()
xy = sorted(zip(self.x, self.smooth_result))
x, y = zip(*xy)
plt.plot(x, y, '-')
plt.plot(self.x, self.y, '.')
if fname:
plt.savefig(fname)
else:
plt.show()
plt.close() | python | def plot(self, fname=None):
"""
Plot the input data and resulting smooth.
Parameters
----------
fname : str, optional
name of file to produce. If none, will show interactively.
"""
plt.figure()
xy = sorted(zip(self.x, self.smooth_result))
x, y = zip(*xy)
plt.plot(x, y, '-')
plt.plot(self.x, self.y, '.')
if fname:
plt.savefig(fname)
else:
plt.show()
plt.close() | [
"def",
"plot",
"(",
"self",
",",
"fname",
"=",
"None",
")",
":",
"plt",
".",
"figure",
"(",
")",
"xy",
"=",
"sorted",
"(",
"zip",
"(",
"self",
".",
"x",
",",
"self",
".",
"smooth_result",
")",
")",
"x",
",",
"y",
"=",
"zip",
"(",
"*",
"xy",
... | Plot the input data and resulting smooth.
Parameters
----------
fname : str, optional
name of file to produce. If none, will show interactively. | [
"Plot",
"the",
"input",
"data",
"and",
"resulting",
"smooth",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L102-L120 | train | 42,100 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother.compute | def compute(self):
"""Perform the smoothing operations."""
self._compute_window_size()
smooth = []
residual = []
x, y = self.x, self.y
# step through x and y data with a window window_size wide.
self._update_values_in_window()
self._update_mean_in_window()
self._update_variance_in_window()
for i, (xi, yi) in enumerate(zip(x, y)):
if ((i - self._neighbors_on_each_side) > 0.0 and
(i + self._neighbors_on_each_side) < len(x)):
self._advance_window()
smooth_here = self._compute_smooth_during_construction(xi)
residual_here = self._compute_cross_validated_residual_here(xi, yi, smooth_here)
smooth.append(smooth_here)
residual.append(residual_here)
self._store_unsorted_results(smooth, residual) | python | def compute(self):
"""Perform the smoothing operations."""
self._compute_window_size()
smooth = []
residual = []
x, y = self.x, self.y
# step through x and y data with a window window_size wide.
self._update_values_in_window()
self._update_mean_in_window()
self._update_variance_in_window()
for i, (xi, yi) in enumerate(zip(x, y)):
if ((i - self._neighbors_on_each_side) > 0.0 and
(i + self._neighbors_on_each_side) < len(x)):
self._advance_window()
smooth_here = self._compute_smooth_during_construction(xi)
residual_here = self._compute_cross_validated_residual_here(xi, yi, smooth_here)
smooth.append(smooth_here)
residual.append(residual_here)
self._store_unsorted_results(smooth, residual) | [
"def",
"compute",
"(",
"self",
")",
":",
"self",
".",
"_compute_window_size",
"(",
")",
"smooth",
"=",
"[",
"]",
"residual",
"=",
"[",
"]",
"x",
",",
"y",
"=",
"self",
".",
"x",
",",
"self",
".",
"y",
"# step through x and y data with a window window_size ... | Perform the smoothing operations. | [
"Perform",
"the",
"smoothing",
"operations",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L151-L172 | train | 42,101 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._update_values_in_window | def _update_values_in_window(self):
"""Update which values are in the current window."""
window_bound_upper = self._window_bound_lower + self.window_size
self._x_in_window = self.x[self._window_bound_lower:window_bound_upper]
self._y_in_window = self.y[self._window_bound_lower:window_bound_upper] | python | def _update_values_in_window(self):
"""Update which values are in the current window."""
window_bound_upper = self._window_bound_lower + self.window_size
self._x_in_window = self.x[self._window_bound_lower:window_bound_upper]
self._y_in_window = self.y[self._window_bound_lower:window_bound_upper] | [
"def",
"_update_values_in_window",
"(",
"self",
")",
":",
"window_bound_upper",
"=",
"self",
".",
"_window_bound_lower",
"+",
"self",
".",
"window_size",
"self",
".",
"_x_in_window",
"=",
"self",
".",
"x",
"[",
"self",
".",
"_window_bound_lower",
":",
"window_bo... | Update which values are in the current window. | [
"Update",
"which",
"values",
"are",
"in",
"the",
"current",
"window",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L182-L186 | train | 42,102 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._update_mean_in_window | def _update_mean_in_window(self):
"""
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
"""
self._mean_x_in_window = numpy.mean(self._x_in_window)
self._mean_y_in_window = numpy.mean(self._y_in_window) | python | def _update_mean_in_window(self):
"""
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
"""
self._mean_x_in_window = numpy.mean(self._x_in_window)
self._mean_y_in_window = numpy.mean(self._y_in_window) | [
"def",
"_update_mean_in_window",
"(",
"self",
")",
":",
"self",
".",
"_mean_x_in_window",
"=",
"numpy",
".",
"mean",
"(",
"self",
".",
"_x_in_window",
")",
"self",
".",
"_mean_y_in_window",
"=",
"numpy",
".",
"mean",
"(",
"self",
".",
"_y_in_window",
")"
] | Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal | [
"Compute",
"mean",
"in",
"window",
"the",
"slow",
"way",
".",
"useful",
"for",
"first",
"step",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L188-L201 | train | 42,103 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._advance_window | def _advance_window(self):
"""Update values in current window and the current window means and variances."""
x_to_remove, y_to_remove = self._x_in_window[0], self._y_in_window[0]
self._window_bound_lower += 1
self._update_values_in_window()
x_to_add, y_to_add = self._x_in_window[-1], self._y_in_window[-1]
self._remove_observation(x_to_remove, y_to_remove)
self._add_observation(x_to_add, y_to_add) | python | def _advance_window(self):
"""Update values in current window and the current window means and variances."""
x_to_remove, y_to_remove = self._x_in_window[0], self._y_in_window[0]
self._window_bound_lower += 1
self._update_values_in_window()
x_to_add, y_to_add = self._x_in_window[-1], self._y_in_window[-1]
self._remove_observation(x_to_remove, y_to_remove)
self._add_observation(x_to_add, y_to_add) | [
"def",
"_advance_window",
"(",
"self",
")",
":",
"x_to_remove",
",",
"y_to_remove",
"=",
"self",
".",
"_x_in_window",
"[",
"0",
"]",
",",
"self",
".",
"_y_in_window",
"[",
"0",
"]",
"self",
".",
"_window_bound_lower",
"+=",
"1",
"self",
".",
"_update_value... | Update values in current window and the current window means and variances. | [
"Update",
"values",
"in",
"current",
"window",
"and",
"the",
"current",
"window",
"means",
"and",
"variances",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L219-L228 | train | 42,104 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._add_observation_to_means | def _add_observation_to_means(self, xj, yj):
"""Update the means without recalculating for the addition of one observation."""
self._mean_x_in_window = ((self.window_size * self._mean_x_in_window + xj) /
(self.window_size + 1.0))
self._mean_y_in_window = ((self.window_size * self._mean_y_in_window + yj) /
(self.window_size + 1.0)) | python | def _add_observation_to_means(self, xj, yj):
"""Update the means without recalculating for the addition of one observation."""
self._mean_x_in_window = ((self.window_size * self._mean_x_in_window + xj) /
(self.window_size + 1.0))
self._mean_y_in_window = ((self.window_size * self._mean_y_in_window + yj) /
(self.window_size + 1.0)) | [
"def",
"_add_observation_to_means",
"(",
"self",
",",
"xj",
",",
"yj",
")",
":",
"self",
".",
"_mean_x_in_window",
"=",
"(",
"(",
"self",
".",
"window_size",
"*",
"self",
".",
"_mean_x_in_window",
"+",
"xj",
")",
"/",
"(",
"self",
".",
"window_size",
"+"... | Update the means without recalculating for the addition of one observation. | [
"Update",
"the",
"means",
"without",
"recalculating",
"for",
"the",
"addition",
"of",
"one",
"observation",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L242-L247 | train | 42,105 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._remove_observation_from_means | def _remove_observation_from_means(self, xj, yj):
"""Update the means without recalculating for the deletion of one observation."""
self._mean_x_in_window = ((self.window_size * self._mean_x_in_window - xj) /
(self.window_size - 1.0))
self._mean_y_in_window = ((self.window_size * self._mean_y_in_window - yj) /
(self.window_size - 1.0)) | python | def _remove_observation_from_means(self, xj, yj):
"""Update the means without recalculating for the deletion of one observation."""
self._mean_x_in_window = ((self.window_size * self._mean_x_in_window - xj) /
(self.window_size - 1.0))
self._mean_y_in_window = ((self.window_size * self._mean_y_in_window - yj) /
(self.window_size - 1.0)) | [
"def",
"_remove_observation_from_means",
"(",
"self",
",",
"xj",
",",
"yj",
")",
":",
"self",
".",
"_mean_x_in_window",
"=",
"(",
"(",
"self",
".",
"window_size",
"*",
"self",
".",
"_mean_x_in_window",
"-",
"xj",
")",
"/",
"(",
"self",
".",
"window_size",
... | Update the means without recalculating for the deletion of one observation. | [
"Update",
"the",
"means",
"without",
"recalculating",
"for",
"the",
"deletion",
"of",
"one",
"observation",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L249-L254 | train | 42,106 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._add_observation_to_variances | def _add_observation_to_variances(self, xj, yj):
"""
Quickly update the variance and co-variance for the addition of one observation.
See Also
--------
_update_variance_in_window : compute variance considering full window
"""
term1 = (self.window_size + 1.0) / self.window_size * (xj - self._mean_x_in_window)
self._covariance_in_window += term1 * (yj - self._mean_y_in_window)
self._variance_in_window += term1 * (xj - self._mean_x_in_window) | python | def _add_observation_to_variances(self, xj, yj):
"""
Quickly update the variance and co-variance for the addition of one observation.
See Also
--------
_update_variance_in_window : compute variance considering full window
"""
term1 = (self.window_size + 1.0) / self.window_size * (xj - self._mean_x_in_window)
self._covariance_in_window += term1 * (yj - self._mean_y_in_window)
self._variance_in_window += term1 * (xj - self._mean_x_in_window) | [
"def",
"_add_observation_to_variances",
"(",
"self",
",",
"xj",
",",
"yj",
")",
":",
"term1",
"=",
"(",
"self",
".",
"window_size",
"+",
"1.0",
")",
"/",
"self",
".",
"window_size",
"*",
"(",
"xj",
"-",
"self",
".",
"_mean_x_in_window",
")",
"self",
".... | Quickly update the variance and co-variance for the addition of one observation.
See Also
--------
_update_variance_in_window : compute variance considering full window | [
"Quickly",
"update",
"the",
"variance",
"and",
"co",
"-",
"variance",
"for",
"the",
"addition",
"of",
"one",
"observation",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L256-L266 | train | 42,107 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._compute_smooth_during_construction | def _compute_smooth_during_construction(self, xi):
"""
Evaluate value of smooth at x-value xi.
Parameters
----------
xi : float
Value of x where smooth value is desired
Returns
-------
smooth_here : float
Value of smooth s(xi)
"""
if self._variance_in_window:
beta = self._covariance_in_window / self._variance_in_window
alpha = self._mean_y_in_window - beta * self._mean_x_in_window
value_of_smooth_here = beta * (xi) + alpha
else:
value_of_smooth_here = 0.0
return value_of_smooth_here | python | def _compute_smooth_during_construction(self, xi):
"""
Evaluate value of smooth at x-value xi.
Parameters
----------
xi : float
Value of x where smooth value is desired
Returns
-------
smooth_here : float
Value of smooth s(xi)
"""
if self._variance_in_window:
beta = self._covariance_in_window / self._variance_in_window
alpha = self._mean_y_in_window - beta * self._mean_x_in_window
value_of_smooth_here = beta * (xi) + alpha
else:
value_of_smooth_here = 0.0
return value_of_smooth_here | [
"def",
"_compute_smooth_during_construction",
"(",
"self",
",",
"xi",
")",
":",
"if",
"self",
".",
"_variance_in_window",
":",
"beta",
"=",
"self",
".",
"_covariance_in_window",
"/",
"self",
".",
"_variance_in_window",
"alpha",
"=",
"self",
".",
"_mean_y_in_window... | Evaluate value of smooth at x-value xi.
Parameters
----------
xi : float
Value of x where smooth value is desired
Returns
-------
smooth_here : float
Value of smooth s(xi) | [
"Evaluate",
"value",
"of",
"smooth",
"at",
"x",
"-",
"value",
"xi",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L274-L294 | train | 42,108 |
partofthething/ace | ace/smoother.py | BasicFixedSpanSmoother._compute_cross_validated_residual_here | def _compute_cross_validated_residual_here(self, xi, yi, smooth_here):
"""
Compute cross validated residual.
This is the absolute residual from Eq. 9. in [1]
"""
denom = (1.0 - 1.0 / self.window_size -
(xi - self._mean_x_in_window) ** 2 /
self._variance_in_window)
if denom == 0.0:
# can happen with small data sets
return 1.0
return abs((yi - smooth_here) / denom) | python | def _compute_cross_validated_residual_here(self, xi, yi, smooth_here):
"""
Compute cross validated residual.
This is the absolute residual from Eq. 9. in [1]
"""
denom = (1.0 - 1.0 / self.window_size -
(xi - self._mean_x_in_window) ** 2 /
self._variance_in_window)
if denom == 0.0:
# can happen with small data sets
return 1.0
return abs((yi - smooth_here) / denom) | [
"def",
"_compute_cross_validated_residual_here",
"(",
"self",
",",
"xi",
",",
"yi",
",",
"smooth_here",
")",
":",
"denom",
"=",
"(",
"1.0",
"-",
"1.0",
"/",
"self",
".",
"window_size",
"-",
"(",
"xi",
"-",
"self",
".",
"_mean_x_in_window",
")",
"**",
"2"... | Compute cross validated residual.
This is the absolute residual from Eq. 9. in [1] | [
"Compute",
"cross",
"validated",
"residual",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L296-L308 | train | 42,109 |
partofthething/ace | ace/samples/breiman85.py | build_sample_ace_problem_breiman85 | def build_sample_ace_problem_breiman85(N=200):
"""Sample problem from Breiman 1985."""
x_cubed = numpy.random.standard_normal(N)
x = scipy.special.cbrt(x_cubed)
noise = numpy.random.standard_normal(N)
y = numpy.exp((x ** 3.0) + noise)
return [x], y | python | def build_sample_ace_problem_breiman85(N=200):
"""Sample problem from Breiman 1985."""
x_cubed = numpy.random.standard_normal(N)
x = scipy.special.cbrt(x_cubed)
noise = numpy.random.standard_normal(N)
y = numpy.exp((x ** 3.0) + noise)
return [x], y | [
"def",
"build_sample_ace_problem_breiman85",
"(",
"N",
"=",
"200",
")",
":",
"x_cubed",
"=",
"numpy",
".",
"random",
".",
"standard_normal",
"(",
"N",
")",
"x",
"=",
"scipy",
".",
"special",
".",
"cbrt",
"(",
"x_cubed",
")",
"noise",
"=",
"numpy",
".",
... | Sample problem from Breiman 1985. | [
"Sample",
"problem",
"from",
"Breiman",
"1985",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/samples/breiman85.py#L9-L15 | train | 42,110 |
partofthething/ace | ace/samples/breiman85.py | run_breiman85 | def run_breiman85():
"""Run Breiman 85 sample."""
x, y = build_sample_ace_problem_breiman85(200)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
ace.plot_transforms(ace_solver, 'sample_ace_breiman85.png')
except ImportError:
pass
return ace_solver | python | def run_breiman85():
"""Run Breiman 85 sample."""
x, y = build_sample_ace_problem_breiman85(200)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
ace.plot_transforms(ace_solver, 'sample_ace_breiman85.png')
except ImportError:
pass
return ace_solver | [
"def",
"run_breiman85",
"(",
")",
":",
"x",
",",
"y",
"=",
"build_sample_ace_problem_breiman85",
"(",
"200",
")",
"ace_solver",
"=",
"ace",
".",
"ACESolver",
"(",
")",
"ace_solver",
".",
"specify_data_set",
"(",
"x",
",",
"y",
")",
"ace_solver",
".",
"solv... | Run Breiman 85 sample. | [
"Run",
"Breiman",
"85",
"sample",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/samples/breiman85.py#L27-L37 | train | 42,111 |
partofthething/ace | ace/samples/breiman85.py | run_breiman2 | def run_breiman2():
"""Run Breiman's other sample problem."""
x, y = build_sample_ace_problem_breiman2(500)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
plt = ace.plot_transforms(ace_solver, None)
except ImportError:
pass
plt.subplot(1, 2, 1)
phi = numpy.sin(2.0 * numpy.pi * x[0])
plt.plot(x[0], phi, label='analytic')
plt.legend()
plt.subplot(1, 2, 2)
y = numpy.exp(phi)
plt.plot(y, phi, label='analytic')
plt.legend(loc='lower right')
# plt.show()
plt.savefig('no_noise_linear_x.png')
return ace_solver | python | def run_breiman2():
"""Run Breiman's other sample problem."""
x, y = build_sample_ace_problem_breiman2(500)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
plt = ace.plot_transforms(ace_solver, None)
except ImportError:
pass
plt.subplot(1, 2, 1)
phi = numpy.sin(2.0 * numpy.pi * x[0])
plt.plot(x[0], phi, label='analytic')
plt.legend()
plt.subplot(1, 2, 2)
y = numpy.exp(phi)
plt.plot(y, phi, label='analytic')
plt.legend(loc='lower right')
# plt.show()
plt.savefig('no_noise_linear_x.png')
return ace_solver | [
"def",
"run_breiman2",
"(",
")",
":",
"x",
",",
"y",
"=",
"build_sample_ace_problem_breiman2",
"(",
"500",
")",
"ace_solver",
"=",
"ace",
".",
"ACESolver",
"(",
")",
"ace_solver",
".",
"specify_data_set",
"(",
"x",
",",
"y",
")",
"ace_solver",
".",
"solve"... | Run Breiman's other sample problem. | [
"Run",
"Breiman",
"s",
"other",
"sample",
"problem",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/samples/breiman85.py#L39-L61 | train | 42,112 |
openstack/monasca-common | monasca_common/kafka/producer.py | KafkaProducer.publish | def publish(self, topic, messages, key=None):
"""Takes messages and puts them on the supplied kafka topic
"""
if not isinstance(messages, list):
messages = [messages]
first = True
success = False
if key is None:
key = int(time.time() * 1000)
messages = [encodeutils.to_utf8(m) for m in messages]
key = bytes(str(key), 'utf-8') if PY3 else str(key)
while not success:
try:
self._producer.send_messages(topic, key, *messages)
success = True
except Exception:
if first:
# This is a warning because of all the other warning and
# error messages that are logged in this case. This way
# someone looking at the log file can see the retry
log.warn("Failed send on topic {}, clear metadata and retry"
.format(topic))
# If Kafka is running in Kubernetes, the cached metadata
# contains the IP Address of the Kafka pod. If the Kafka
# pod has restarted, the IP Address will have changed
# which would have caused the first publish to fail. So,
# clear the cached metadata and retry the publish
self._kafka.reset_topic_metadata(topic)
first = False
continue
log.exception('Error publishing to {} topic.'.format(topic))
raise | python | def publish(self, topic, messages, key=None):
"""Takes messages and puts them on the supplied kafka topic
"""
if not isinstance(messages, list):
messages = [messages]
first = True
success = False
if key is None:
key = int(time.time() * 1000)
messages = [encodeutils.to_utf8(m) for m in messages]
key = bytes(str(key), 'utf-8') if PY3 else str(key)
while not success:
try:
self._producer.send_messages(topic, key, *messages)
success = True
except Exception:
if first:
# This is a warning because of all the other warning and
# error messages that are logged in this case. This way
# someone looking at the log file can see the retry
log.warn("Failed send on topic {}, clear metadata and retry"
.format(topic))
# If Kafka is running in Kubernetes, the cached metadata
# contains the IP Address of the Kafka pod. If the Kafka
# pod has restarted, the IP Address will have changed
# which would have caused the first publish to fail. So,
# clear the cached metadata and retry the publish
self._kafka.reset_topic_metadata(topic)
first = False
continue
log.exception('Error publishing to {} topic.'.format(topic))
raise | [
"def",
"publish",
"(",
"self",
",",
"topic",
",",
"messages",
",",
"key",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"messages",
",",
"list",
")",
":",
"messages",
"=",
"[",
"messages",
"]",
"first",
"=",
"True",
"success",
"=",
"False",
... | Takes messages and puts them on the supplied kafka topic | [
"Takes",
"messages",
"and",
"puts",
"them",
"on",
"the",
"supplied",
"kafka",
"topic"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka/producer.py#L45-L83 | train | 42,113 |
openstack/monasca-common | monasca_common/kafka_lib/protocol.py | create_gzip_message | def create_gzip_message(payloads, key=None, compresslevel=None):
"""
Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
gzipped = gzip_encode(message_set, compresslevel=compresslevel)
codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP
return Message(0, 0x00 | codec, key, gzipped) | python | def create_gzip_message(payloads, key=None, compresslevel=None):
"""
Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
gzipped = gzip_encode(message_set, compresslevel=compresslevel)
codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP
return Message(0, 0x00 | codec, key, gzipped) | [
"def",
"create_gzip_message",
"(",
"payloads",
",",
"key",
"=",
"None",
",",
"compresslevel",
"=",
"None",
")",
":",
"message_set",
"=",
"KafkaProtocol",
".",
"_encode_message_set",
"(",
"[",
"create_message",
"(",
"payload",
",",
"pl_key",
")",
"for",
"payloa... | Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional) | [
"Construct",
"a",
"Gzipped",
"Message",
"containing",
"multiple",
"Messages"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L601-L619 | train | 42,114 |
openstack/monasca-common | monasca_common/kafka_lib/protocol.py | KafkaProtocol._decode_message_set_iter | def _decode_message_set_iter(cls, data):
"""
Iteratively decode a MessageSet
Reads repeated elements of (offset, message), calling decode_message
to decode a single message. Since compressed messages contain futher
MessageSets, these two methods have been decoupled so that they may
recurse easily.
"""
cur = 0
read_message = False
while cur < len(data):
try:
((offset, ), cur) = relative_unpack('>q', data, cur)
(msg, cur) = read_int_string(data, cur)
for (offset, message) in KafkaProtocol._decode_message(msg, offset):
read_message = True
yield OffsetAndMessage(offset, message)
except BufferUnderflowError:
# NOTE: Not sure this is correct error handling:
# Is it possible to get a BUE if the message set is somewhere
# in the middle of the fetch response? If so, we probably have
# an issue that's not fetch size too small.
# Aren't we ignoring errors if we fail to unpack data by
# raising StopIteration()?
# If _decode_message() raises a ChecksumError, couldn't that
# also be due to the fetch size being too small?
if read_message is False:
# If we get a partial read of a message, but haven't
# yielded anything there's a problem
raise ConsumerFetchSizeTooSmall()
else:
raise StopIteration() | python | def _decode_message_set_iter(cls, data):
"""
Iteratively decode a MessageSet
Reads repeated elements of (offset, message), calling decode_message
to decode a single message. Since compressed messages contain futher
MessageSets, these two methods have been decoupled so that they may
recurse easily.
"""
cur = 0
read_message = False
while cur < len(data):
try:
((offset, ), cur) = relative_unpack('>q', data, cur)
(msg, cur) = read_int_string(data, cur)
for (offset, message) in KafkaProtocol._decode_message(msg, offset):
read_message = True
yield OffsetAndMessage(offset, message)
except BufferUnderflowError:
# NOTE: Not sure this is correct error handling:
# Is it possible to get a BUE if the message set is somewhere
# in the middle of the fetch response? If so, we probably have
# an issue that's not fetch size too small.
# Aren't we ignoring errors if we fail to unpack data by
# raising StopIteration()?
# If _decode_message() raises a ChecksumError, couldn't that
# also be due to the fetch size being too small?
if read_message is False:
# If we get a partial read of a message, but haven't
# yielded anything there's a problem
raise ConsumerFetchSizeTooSmall()
else:
raise StopIteration() | [
"def",
"_decode_message_set_iter",
"(",
"cls",
",",
"data",
")",
":",
"cur",
"=",
"0",
"read_message",
"=",
"False",
"while",
"cur",
"<",
"len",
"(",
"data",
")",
":",
"try",
":",
"(",
"(",
"offset",
",",
")",
",",
"cur",
")",
"=",
"relative_unpack",... | Iteratively decode a MessageSet
Reads repeated elements of (offset, message), calling decode_message
to decode a single message. Since compressed messages contain futher
MessageSets, these two methods have been decoupled so that they may
recurse easily. | [
"Iteratively",
"decode",
"a",
"MessageSet"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L126-L158 | train | 42,115 |
openstack/monasca-common | monasca_common/kafka_lib/protocol.py | KafkaProtocol._decode_message | def _decode_message(cls, data, offset):
"""
Decode a single Message
The only caller of this method is decode_message_set_iter.
They are decoupled to support nested messages (compressed MessageSets).
The offset is actually read from decode_message_set_iter (it is part
of the MessageSet payload).
"""
((crc, magic, att), cur) = relative_unpack('>IBB', data, 0)
if crc != crc32(data[4:]):
raise ChecksumError("Message checksum failed")
(key, cur) = read_int_string(data, cur)
(value, cur) = read_int_string(data, cur)
codec = att & ATTRIBUTE_CODEC_MASK
if codec == CODEC_NONE:
yield (offset, Message(magic, att, key, value))
elif codec == CODEC_GZIP:
gz = gzip_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(gz):
yield (offset, msg)
elif codec == CODEC_SNAPPY:
snp = snappy_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(snp):
yield (offset, msg) | python | def _decode_message(cls, data, offset):
"""
Decode a single Message
The only caller of this method is decode_message_set_iter.
They are decoupled to support nested messages (compressed MessageSets).
The offset is actually read from decode_message_set_iter (it is part
of the MessageSet payload).
"""
((crc, magic, att), cur) = relative_unpack('>IBB', data, 0)
if crc != crc32(data[4:]):
raise ChecksumError("Message checksum failed")
(key, cur) = read_int_string(data, cur)
(value, cur) = read_int_string(data, cur)
codec = att & ATTRIBUTE_CODEC_MASK
if codec == CODEC_NONE:
yield (offset, Message(magic, att, key, value))
elif codec == CODEC_GZIP:
gz = gzip_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(gz):
yield (offset, msg)
elif codec == CODEC_SNAPPY:
snp = snappy_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(snp):
yield (offset, msg) | [
"def",
"_decode_message",
"(",
"cls",
",",
"data",
",",
"offset",
")",
":",
"(",
"(",
"crc",
",",
"magic",
",",
"att",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>IBB'",
",",
"data",
",",
"0",
")",
"if",
"crc",
"!=",
"crc32",
"(",
"data... | Decode a single Message
The only caller of this method is decode_message_set_iter.
They are decoupled to support nested messages (compressed MessageSets).
The offset is actually read from decode_message_set_iter (it is part
of the MessageSet payload). | [
"Decode",
"a",
"single",
"Message"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L161-L190 | train | 42,116 |
openstack/monasca-common | monasca_common/kafka_lib/protocol.py | KafkaProtocol.decode_offset_response | def decode_offset_response(cls, data):
"""
Decode bytes to an OffsetResponse
Arguments:
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _ in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _ in range(num_partitions):
((partition, error, num_offsets,), cur) = \
relative_unpack('>ihi', data, cur)
offsets = []
for k in range(num_offsets):
((offset,), cur) = relative_unpack('>q', data, cur)
offsets.append(offset)
yield OffsetResponse(topic, partition, error, tuple(offsets)) | python | def decode_offset_response(cls, data):
"""
Decode bytes to an OffsetResponse
Arguments:
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _ in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _ in range(num_partitions):
((partition, error, num_offsets,), cur) = \
relative_unpack('>ihi', data, cur)
offsets = []
for k in range(num_offsets):
((offset,), cur) = relative_unpack('>q', data, cur)
offsets.append(offset)
yield OffsetResponse(topic, partition, error, tuple(offsets)) | [
"def",
"decode_offset_response",
"(",
"cls",
",",
"data",
")",
":",
"(",
"(",
"correlation_id",
",",
"num_topics",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>ii'",
",",
"data",
",",
"0",
")",
"for",
"_",
"in",
"range",
"(",
"num_topics",
")"... | Decode bytes to an OffsetResponse
Arguments:
data: bytes to decode | [
"Decode",
"bytes",
"to",
"an",
"OffsetResponse"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L344-L366 | train | 42,117 |
openstack/monasca-common | monasca_common/kafka_lib/protocol.py | KafkaProtocol.encode_offset_commit_request | def encode_offset_commit_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetCommitRequest structs
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_COMMIT_KEY))
message.append(write_short_string(group))
message.append(struct.pack('>i', len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>iq', partition, payload.offset))
message.append(write_short_string(payload.metadata))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg) | python | def encode_offset_commit_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetCommitRequest structs
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_COMMIT_KEY))
message.append(write_short_string(group))
message.append(struct.pack('>i', len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>iq', partition, payload.offset))
message.append(write_short_string(payload.metadata))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg) | [
"def",
"encode_offset_commit_request",
"(",
"cls",
",",
"client_id",
",",
"correlation_id",
",",
"group",
",",
"payloads",
")",
":",
"grouped_payloads",
"=",
"group_by_topic_and_partition",
"(",
"payloads",
")",
"message",
"=",
"[",
"]",
"message",
".",
"append",
... | Encode some OffsetCommitRequest structs
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequest | [
"Encode",
"some",
"OffsetCommitRequest",
"structs"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L478-L506 | train | 42,118 |
openstack/monasca-common | monasca_common/kafka_lib/protocol.py | KafkaProtocol.encode_offset_fetch_request | def encode_offset_fetch_request(cls, client_id, correlation_id,
group, payloads, from_kafka=False):
"""
Encode some OffsetFetchRequest structs. The request is encoded using
version 0 if from_kafka is false, indicating a request for Zookeeper
offsets. It is encoded using version 1 otherwise, indicating a request
for Kafka offsets.
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequest
from_kafka: bool, default False, set True for Kafka-committed offsets
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
reqver = 1 if from_kafka else 0
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_FETCH_KEY,
version=reqver))
message.append(write_short_string(group))
message.append(struct.pack('>i', len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>i', partition))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg) | python | def encode_offset_fetch_request(cls, client_id, correlation_id,
group, payloads, from_kafka=False):
"""
Encode some OffsetFetchRequest structs. The request is encoded using
version 0 if from_kafka is false, indicating a request for Zookeeper
offsets. It is encoded using version 1 otherwise, indicating a request
for Kafka offsets.
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequest
from_kafka: bool, default False, set True for Kafka-committed offsets
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
reqver = 1 if from_kafka else 0
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_FETCH_KEY,
version=reqver))
message.append(write_short_string(group))
message.append(struct.pack('>i', len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(write_short_string(topic))
message.append(struct.pack('>i', len(topic_payloads)))
for partition, payload in topic_payloads.items():
message.append(struct.pack('>i', partition))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg) | [
"def",
"encode_offset_fetch_request",
"(",
"cls",
",",
"client_id",
",",
"correlation_id",
",",
"group",
",",
"payloads",
",",
"from_kafka",
"=",
"False",
")",
":",
"grouped_payloads",
"=",
"group_by_topic_and_partition",
"(",
"payloads",
")",
"message",
"=",
"[",... | Encode some OffsetFetchRequest structs. The request is encoded using
version 0 if from_kafka is false, indicating a request for Zookeeper
offsets. It is encoded using version 1 otherwise, indicating a request
for Kafka offsets.
Arguments:
client_id: string
correlation_id: int
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequest
from_kafka: bool, default False, set True for Kafka-committed offsets | [
"Encode",
"some",
"OffsetFetchRequest",
"structs",
".",
"The",
"request",
"is",
"encoded",
"using",
"version",
"0",
"if",
"from_kafka",
"is",
"false",
"indicating",
"a",
"request",
"for",
"Zookeeper",
"offsets",
".",
"It",
"is",
"encoded",
"using",
"version",
... | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L528-L562 | train | 42,119 |
openstack/monasca-common | monasca_common/simport/simport.py | _get_module | def _get_module(target):
"""Import a named class, module, method or function.
Accepts these formats:
".../file/path|module_name:Class.method"
".../file/path|module_name:Class"
".../file/path|module_name:function"
"module_name:Class"
"module_name:function"
"module_name:Class.function"
If a fully qualified directory is specified, it implies the
directory is not already on the Python Path, in which case
it will be added.
For example, if I import /home/foo (and
/home/foo is not in the python path) as
"/home/foo|mycode:MyClass.mymethod"
then /home/foo will be added to the python path and
the module loaded as normal.
"""
filepath, sep, namespace = target.rpartition('|')
if sep and not filepath:
raise BadDirectory("Path to file not supplied.")
module, sep, class_or_function = namespace.rpartition(':')
if (sep and not module) or (filepath and not module):
raise MissingModule("Need a module path for %s (%s)" %
(namespace, target))
if filepath and filepath not in sys.path:
if not os.path.isdir(filepath):
raise BadDirectory("No such directory: '%s'" % filepath)
sys.path.append(filepath)
if not class_or_function:
raise MissingMethodOrFunction(
"No Method or Function specified in '%s'" % target)
if module:
try:
__import__(module)
except ImportError as e:
raise ImportFailed("Failed to import '%s'. "
"Error: %s" % (module, e))
klass, sep, function = class_or_function.rpartition('.')
return module, klass, function | python | def _get_module(target):
"""Import a named class, module, method or function.
Accepts these formats:
".../file/path|module_name:Class.method"
".../file/path|module_name:Class"
".../file/path|module_name:function"
"module_name:Class"
"module_name:function"
"module_name:Class.function"
If a fully qualified directory is specified, it implies the
directory is not already on the Python Path, in which case
it will be added.
For example, if I import /home/foo (and
/home/foo is not in the python path) as
"/home/foo|mycode:MyClass.mymethod"
then /home/foo will be added to the python path and
the module loaded as normal.
"""
filepath, sep, namespace = target.rpartition('|')
if sep and not filepath:
raise BadDirectory("Path to file not supplied.")
module, sep, class_or_function = namespace.rpartition(':')
if (sep and not module) or (filepath and not module):
raise MissingModule("Need a module path for %s (%s)" %
(namespace, target))
if filepath and filepath not in sys.path:
if not os.path.isdir(filepath):
raise BadDirectory("No such directory: '%s'" % filepath)
sys.path.append(filepath)
if not class_or_function:
raise MissingMethodOrFunction(
"No Method or Function specified in '%s'" % target)
if module:
try:
__import__(module)
except ImportError as e:
raise ImportFailed("Failed to import '%s'. "
"Error: %s" % (module, e))
klass, sep, function = class_or_function.rpartition('.')
return module, klass, function | [
"def",
"_get_module",
"(",
"target",
")",
":",
"filepath",
",",
"sep",
",",
"namespace",
"=",
"target",
".",
"rpartition",
"(",
"'|'",
")",
"if",
"sep",
"and",
"not",
"filepath",
":",
"raise",
"BadDirectory",
"(",
"\"Path to file not supplied.\"",
")",
"modu... | Import a named class, module, method or function.
Accepts these formats:
".../file/path|module_name:Class.method"
".../file/path|module_name:Class"
".../file/path|module_name:function"
"module_name:Class"
"module_name:function"
"module_name:Class.function"
If a fully qualified directory is specified, it implies the
directory is not already on the Python Path, in which case
it will be added.
For example, if I import /home/foo (and
/home/foo is not in the python path) as
"/home/foo|mycode:MyClass.mymethod"
then /home/foo will be added to the python path and
the module loaded as normal. | [
"Import",
"a",
"named",
"class",
"module",
"method",
"or",
"function",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/simport/simport.py#L37-L85 | train | 42,120 |
openstack/monasca-common | monasca_common/simport/simport.py | load | def load(target, source_module=None):
"""Get the actual implementation of the target."""
module, klass, function = _get_module(target)
if not module and source_module:
module = source_module
if not module:
raise MissingModule(
"No module name supplied or source_module provided.")
actual_module = sys.modules[module]
if not klass:
return getattr(actual_module, function)
class_object = getattr(actual_module, klass)
if function:
return getattr(class_object, function)
return class_object | python | def load(target, source_module=None):
"""Get the actual implementation of the target."""
module, klass, function = _get_module(target)
if not module and source_module:
module = source_module
if not module:
raise MissingModule(
"No module name supplied or source_module provided.")
actual_module = sys.modules[module]
if not klass:
return getattr(actual_module, function)
class_object = getattr(actual_module, klass)
if function:
return getattr(class_object, function)
return class_object | [
"def",
"load",
"(",
"target",
",",
"source_module",
"=",
"None",
")",
":",
"module",
",",
"klass",
",",
"function",
"=",
"_get_module",
"(",
"target",
")",
"if",
"not",
"module",
"and",
"source_module",
":",
"module",
"=",
"source_module",
"if",
"not",
"... | Get the actual implementation of the target. | [
"Get",
"the",
"actual",
"implementation",
"of",
"the",
"target",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/simport/simport.py#L88-L103 | train | 42,121 |
robotpy/pyfrc | docs/conf.py | process_child | def process_child(node):
"""This function changes class references to not have the
intermediate module name by hacking at the doctree"""
# Edit descriptions to be nicer
if isinstance(node, sphinx.addnodes.desc_addname):
if len(node.children) == 1:
child = node.children[0]
text = child.astext()
if text.startswith("wpilib.") and text.endswith("."):
# remove the last element
text = ".".join(text.split(".")[:-2]) + "."
node.children[0] = docutils.nodes.Text(text)
# Edit literals to be nicer
elif isinstance(node, docutils.nodes.literal):
child = node.children[0]
text = child.astext()
# Remove the imported module name
if text.startswith("wpilib."):
stext = text.split(".")
text = ".".join(stext[:-2] + [stext[-1]])
node.children[0] = docutils.nodes.Text(text)
for child in node.children:
process_child(child) | python | def process_child(node):
"""This function changes class references to not have the
intermediate module name by hacking at the doctree"""
# Edit descriptions to be nicer
if isinstance(node, sphinx.addnodes.desc_addname):
if len(node.children) == 1:
child = node.children[0]
text = child.astext()
if text.startswith("wpilib.") and text.endswith("."):
# remove the last element
text = ".".join(text.split(".")[:-2]) + "."
node.children[0] = docutils.nodes.Text(text)
# Edit literals to be nicer
elif isinstance(node, docutils.nodes.literal):
child = node.children[0]
text = child.astext()
# Remove the imported module name
if text.startswith("wpilib."):
stext = text.split(".")
text = ".".join(stext[:-2] + [stext[-1]])
node.children[0] = docutils.nodes.Text(text)
for child in node.children:
process_child(child) | [
"def",
"process_child",
"(",
"node",
")",
":",
"# Edit descriptions to be nicer",
"if",
"isinstance",
"(",
"node",
",",
"sphinx",
".",
"addnodes",
".",
"desc_addname",
")",
":",
"if",
"len",
"(",
"node",
".",
"children",
")",
"==",
"1",
":",
"child",
"=",
... | This function changes class references to not have the
intermediate module name by hacking at the doctree | [
"This",
"function",
"changes",
"class",
"references",
"to",
"not",
"have",
"the",
"intermediate",
"module",
"name",
"by",
"hacking",
"at",
"the",
"doctree"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/docs/conf.py#L146-L172 | train | 42,122 |
partofthething/ace | ace/model.py | read_column_data_from_txt | def read_column_data_from_txt(fname):
"""
Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values
"""
datafile = open(fname)
datarows = []
for line in datafile:
datarows.append([float(li) for li in line.split()])
datacols = list(zip(*datarows))
x_values = datacols[1:]
y_values = datacols[0]
return x_values, y_values | python | def read_column_data_from_txt(fname):
"""
Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values
"""
datafile = open(fname)
datarows = []
for line in datafile:
datarows.append([float(li) for li in line.split()])
datacols = list(zip(*datarows))
x_values = datacols[1:]
y_values = datacols[0]
return x_values, y_values | [
"def",
"read_column_data_from_txt",
"(",
"fname",
")",
":",
"datafile",
"=",
"open",
"(",
"fname",
")",
"datarows",
"=",
"[",
"]",
"for",
"line",
"in",
"datafile",
":",
"datarows",
".",
"append",
"(",
"[",
"float",
"(",
"li",
")",
"for",
"li",
"in",
... | Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values | [
"Read",
"data",
"from",
"a",
"simple",
"text",
"file",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/model.py#L18-L41 | train | 42,123 |
partofthething/ace | ace/model.py | Model.build_model_from_txt | def build_model_from_txt(self, fname):
"""
Construct the model and perform regressions based on data in a txt file.
Parameters
----------
fname : str
The name of the file to load.
"""
x_values, y_values = read_column_data_from_txt(fname)
self.build_model_from_xy(x_values, y_values) | python | def build_model_from_txt(self, fname):
"""
Construct the model and perform regressions based on data in a txt file.
Parameters
----------
fname : str
The name of the file to load.
"""
x_values, y_values = read_column_data_from_txt(fname)
self.build_model_from_xy(x_values, y_values) | [
"def",
"build_model_from_txt",
"(",
"self",
",",
"fname",
")",
":",
"x_values",
",",
"y_values",
"=",
"read_column_data_from_txt",
"(",
"fname",
")",
"self",
".",
"build_model_from_xy",
"(",
"x_values",
",",
"y_values",
")"
] | Construct the model and perform regressions based on data in a txt file.
Parameters
----------
fname : str
The name of the file to load. | [
"Construct",
"the",
"model",
"and",
"perform",
"regressions",
"based",
"on",
"data",
"in",
"a",
"txt",
"file",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/model.py#L53-L63 | train | 42,124 |
partofthething/ace | ace/model.py | Model.build_model_from_xy | def build_model_from_xy(self, x_values, y_values):
"""Construct the model and perform regressions based on x, y data."""
self.init_ace(x_values, y_values)
self.run_ace()
self.build_interpolators() | python | def build_model_from_xy(self, x_values, y_values):
"""Construct the model and perform regressions based on x, y data."""
self.init_ace(x_values, y_values)
self.run_ace()
self.build_interpolators() | [
"def",
"build_model_from_xy",
"(",
"self",
",",
"x_values",
",",
"y_values",
")",
":",
"self",
".",
"init_ace",
"(",
"x_values",
",",
"y_values",
")",
"self",
".",
"run_ace",
"(",
")",
"self",
".",
"build_interpolators",
"(",
")"
] | Construct the model and perform regressions based on x, y data. | [
"Construct",
"the",
"model",
"and",
"perform",
"regressions",
"based",
"on",
"x",
"y",
"data",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/model.py#L65-L69 | train | 42,125 |
partofthething/ace | ace/model.py | Model.build_interpolators | def build_interpolators(self):
"""Compute 1-D interpolation functions for all the transforms so they're continuous.."""
self.phi_continuous = []
for xi, phii in zip(self.ace.x, self.ace.x_transforms):
self.phi_continuous.append(interp1d(xi, phii))
self.inverse_theta_continuous = interp1d(self.ace.y_transform, self.ace.y) | python | def build_interpolators(self):
"""Compute 1-D interpolation functions for all the transforms so they're continuous.."""
self.phi_continuous = []
for xi, phii in zip(self.ace.x, self.ace.x_transforms):
self.phi_continuous.append(interp1d(xi, phii))
self.inverse_theta_continuous = interp1d(self.ace.y_transform, self.ace.y) | [
"def",
"build_interpolators",
"(",
"self",
")",
":",
"self",
".",
"phi_continuous",
"=",
"[",
"]",
"for",
"xi",
",",
"phii",
"in",
"zip",
"(",
"self",
".",
"ace",
".",
"x",
",",
"self",
".",
"ace",
".",
"x_transforms",
")",
":",
"self",
".",
"phi_c... | Compute 1-D interpolation functions for all the transforms so they're continuous.. | [
"Compute",
"1",
"-",
"D",
"interpolation",
"functions",
"for",
"all",
"the",
"transforms",
"so",
"they",
"re",
"continuous",
".."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/model.py#L79-L84 | train | 42,126 |
partofthething/ace | ace/model.py | Model.eval | def eval(self, x_values):
"""
Evaluate the ACE regression at any combination of independent variable values.
Parameters
----------
x_values : iterable
a float x-value for each independent variable, e.g. (1.5, 2.5)
"""
if len(x_values) != len(self.phi_continuous):
raise ValueError('x_values must have length equal to the number of independent variables '
'({0}) rather than {1}.'.format(len(self.phi_continuous),
len(x_values)))
sum_phi = sum([phi(xi) for phi, xi in zip(self.phi_continuous, x_values)])
return float(self.inverse_theta_continuous(sum_phi)) | python | def eval(self, x_values):
"""
Evaluate the ACE regression at any combination of independent variable values.
Parameters
----------
x_values : iterable
a float x-value for each independent variable, e.g. (1.5, 2.5)
"""
if len(x_values) != len(self.phi_continuous):
raise ValueError('x_values must have length equal to the number of independent variables '
'({0}) rather than {1}.'.format(len(self.phi_continuous),
len(x_values)))
sum_phi = sum([phi(xi) for phi, xi in zip(self.phi_continuous, x_values)])
return float(self.inverse_theta_continuous(sum_phi)) | [
"def",
"eval",
"(",
"self",
",",
"x_values",
")",
":",
"if",
"len",
"(",
"x_values",
")",
"!=",
"len",
"(",
"self",
".",
"phi_continuous",
")",
":",
"raise",
"ValueError",
"(",
"'x_values must have length equal to the number of independent variables '",
"'({0}) rath... | Evaluate the ACE regression at any combination of independent variable values.
Parameters
----------
x_values : iterable
a float x-value for each independent variable, e.g. (1.5, 2.5) | [
"Evaluate",
"the",
"ACE",
"regression",
"at",
"any",
"combination",
"of",
"independent",
"variable",
"values",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/model.py#L86-L101 | train | 42,127 |
robotpy/pyfrc | lib/pyfrc/util.py | yesno | def yesno(prompt):
"""Returns True if user answers 'y' """
prompt += " [y/n]"
a = ""
while a not in ["y", "n"]:
a = input(prompt).lower()
return a == "y" | python | def yesno(prompt):
"""Returns True if user answers 'y' """
prompt += " [y/n]"
a = ""
while a not in ["y", "n"]:
a = input(prompt).lower()
return a == "y" | [
"def",
"yesno",
"(",
"prompt",
")",
":",
"prompt",
"+=",
"\" [y/n]\"",
"a",
"=",
"\"\"",
"while",
"a",
"not",
"in",
"[",
"\"y\"",
",",
"\"n\"",
"]",
":",
"a",
"=",
"input",
"(",
"prompt",
")",
".",
"lower",
"(",
")",
"return",
"a",
"==",
"\"y\""
... | Returns True if user answers 'y' | [
"Returns",
"True",
"if",
"user",
"answers",
"y"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/util.py#L8-L15 | train | 42,128 |
openstack/monasca-common | docker/kafka_wait_for_topics.py | retry | def retry(retries=KAFKA_WAIT_RETRIES, delay=KAFKA_WAIT_INTERVAL,
check_exceptions=()):
"""Retry decorator."""
def decorator(func):
"""Decorator."""
def f_retry(*args, **kwargs):
"""Retry running function on exception after delay."""
for i in range(1, retries + 1):
try:
return func(*args, **kwargs)
# pylint: disable=W0703
# We want to catch all exceptions here to retry.
except check_exceptions + (Exception,) as exc:
if i < retries:
logger.info('Connection attempt %d of %d failed',
i, retries)
if isinstance(exc, check_exceptions):
logger.debug('Caught known exception, retrying...',
exc_info=True)
else:
logger.warn(
'Caught unknown exception, retrying...',
exc_info=True)
else:
logger.exception('Failed after %d attempts', retries)
raise
# No exception so wait before retrying
time.sleep(delay)
return f_retry
return decorator | python | def retry(retries=KAFKA_WAIT_RETRIES, delay=KAFKA_WAIT_INTERVAL,
check_exceptions=()):
"""Retry decorator."""
def decorator(func):
"""Decorator."""
def f_retry(*args, **kwargs):
"""Retry running function on exception after delay."""
for i in range(1, retries + 1):
try:
return func(*args, **kwargs)
# pylint: disable=W0703
# We want to catch all exceptions here to retry.
except check_exceptions + (Exception,) as exc:
if i < retries:
logger.info('Connection attempt %d of %d failed',
i, retries)
if isinstance(exc, check_exceptions):
logger.debug('Caught known exception, retrying...',
exc_info=True)
else:
logger.warn(
'Caught unknown exception, retrying...',
exc_info=True)
else:
logger.exception('Failed after %d attempts', retries)
raise
# No exception so wait before retrying
time.sleep(delay)
return f_retry
return decorator | [
"def",
"retry",
"(",
"retries",
"=",
"KAFKA_WAIT_RETRIES",
",",
"delay",
"=",
"KAFKA_WAIT_INTERVAL",
",",
"check_exceptions",
"=",
"(",
")",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"\"\"\"Decorator.\"\"\"",
"def",
"f_retry",
"(",
"*",
"args",
",... | Retry decorator. | [
"Retry",
"decorator",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/docker/kafka_wait_for_topics.py#L74-L106 | train | 42,129 |
openstack/monasca-common | docker/kafka_wait_for_topics.py | check_topics | def check_topics(client, req_topics):
"""Check for existence of provided topics in Kafka."""
client.update_cluster()
logger.debug('Found topics: %r', client.topics.keys())
for req_topic in req_topics:
if req_topic not in client.topics.keys():
err_topic_not_found = 'Topic not found: {}'.format(req_topic)
logger.warning(err_topic_not_found)
raise TopicNotFound(err_topic_not_found)
topic = client.topics[req_topic]
if not topic.partitions:
err_topic_no_part = 'Topic has no partitions: {}'.format(req_topic)
logger.warning(err_topic_no_part)
raise TopicNoPartition(err_topic_no_part)
logger.info('Topic is ready: %s', req_topic) | python | def check_topics(client, req_topics):
"""Check for existence of provided topics in Kafka."""
client.update_cluster()
logger.debug('Found topics: %r', client.topics.keys())
for req_topic in req_topics:
if req_topic not in client.topics.keys():
err_topic_not_found = 'Topic not found: {}'.format(req_topic)
logger.warning(err_topic_not_found)
raise TopicNotFound(err_topic_not_found)
topic = client.topics[req_topic]
if not topic.partitions:
err_topic_no_part = 'Topic has no partitions: {}'.format(req_topic)
logger.warning(err_topic_no_part)
raise TopicNoPartition(err_topic_no_part)
logger.info('Topic is ready: %s', req_topic) | [
"def",
"check_topics",
"(",
"client",
",",
"req_topics",
")",
":",
"client",
".",
"update_cluster",
"(",
")",
"logger",
".",
"debug",
"(",
"'Found topics: %r'",
",",
"client",
".",
"topics",
".",
"keys",
"(",
")",
")",
"for",
"req_topic",
"in",
"req_topics... | Check for existence of provided topics in Kafka. | [
"Check",
"for",
"existence",
"of",
"provided",
"topics",
"in",
"Kafka",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/docker/kafka_wait_for_topics.py#L110-L127 | train | 42,130 |
openstack/monasca-common | monasca_common/kafka_lib/conn.py | KafkaConnection.send | def send(self, request_id, payload):
"""
Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol)
"""
log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id))
# Make sure we have a connection
if not self._sock:
self.reinit()
try:
self._sock.sendall(payload)
except socket.error:
log.exception('Unable to send payload to Kafka')
self._raise_connection_error() | python | def send(self, request_id, payload):
"""
Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol)
"""
log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id))
# Make sure we have a connection
if not self._sock:
self.reinit()
try:
self._sock.sendall(payload)
except socket.error:
log.exception('Unable to send payload to Kafka')
self._raise_connection_error() | [
"def",
"send",
"(",
"self",
",",
"request_id",
",",
"payload",
")",
":",
"log",
".",
"debug",
"(",
"\"About to send %d bytes to Kafka, request %d\"",
"%",
"(",
"len",
"(",
"payload",
")",
",",
"request_id",
")",
")",
"# Make sure we have a connection",
"if",
"no... | Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol) | [
"Send",
"a",
"request",
"to",
"Kafka"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/conn.py#L138-L157 | train | 42,131 |
openstack/monasca-common | monasca_common/kafka_lib/conn.py | KafkaConnection.recv | def recv(self, request_id):
"""
Get a response packet from Kafka
Arguments:
request_id: can be any int (only used for debug logging...)
Returns:
str: Encoded kafka packet response from server
"""
log.debug("Reading response %d from Kafka" % request_id)
# Make sure we have a connection
if not self._sock:
self.reinit()
# Read the size off of the header
resp = self._read_bytes(4)
(size,) = struct.unpack('>i', resp)
# Read the remainder of the response
resp = self._read_bytes(size)
return resp | python | def recv(self, request_id):
"""
Get a response packet from Kafka
Arguments:
request_id: can be any int (only used for debug logging...)
Returns:
str: Encoded kafka packet response from server
"""
log.debug("Reading response %d from Kafka" % request_id)
# Make sure we have a connection
if not self._sock:
self.reinit()
# Read the size off of the header
resp = self._read_bytes(4)
(size,) = struct.unpack('>i', resp)
# Read the remainder of the response
resp = self._read_bytes(size)
return resp | [
"def",
"recv",
"(",
"self",
",",
"request_id",
")",
":",
"log",
".",
"debug",
"(",
"\"Reading response %d from Kafka\"",
"%",
"request_id",
")",
"# Make sure we have a connection",
"if",
"not",
"self",
".",
"_sock",
":",
"self",
".",
"reinit",
"(",
")",
"# Rea... | Get a response packet from Kafka
Arguments:
request_id: can be any int (only used for debug logging...)
Returns:
str: Encoded kafka packet response from server | [
"Get",
"a",
"response",
"packet",
"from",
"Kafka"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/conn.py#L159-L181 | train | 42,132 |
openstack/monasca-common | monasca_common/kafka_lib/conn.py | KafkaConnection.copy | def copy(self):
"""
Create an inactive copy of the connection object, suitable for
passing to a background thread.
The returned copy is not connected; you must call reinit() before
using.
"""
c = copy.deepcopy(self)
# Python 3 doesn't copy custom attributes of the threadlocal subclass
c.host = copy.copy(self.host)
c.port = copy.copy(self.port)
c.timeout = copy.copy(self.timeout)
c._sock = None
return c | python | def copy(self):
"""
Create an inactive copy of the connection object, suitable for
passing to a background thread.
The returned copy is not connected; you must call reinit() before
using.
"""
c = copy.deepcopy(self)
# Python 3 doesn't copy custom attributes of the threadlocal subclass
c.host = copy.copy(self.host)
c.port = copy.copy(self.port)
c.timeout = copy.copy(self.timeout)
c._sock = None
return c | [
"def",
"copy",
"(",
"self",
")",
":",
"c",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"# Python 3 doesn't copy custom attributes of the threadlocal subclass",
"c",
".",
"host",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"host",
")",
"c",
".",
"port",
... | Create an inactive copy of the connection object, suitable for
passing to a background thread.
The returned copy is not connected; you must call reinit() before
using. | [
"Create",
"an",
"inactive",
"copy",
"of",
"the",
"connection",
"object",
"suitable",
"for",
"passing",
"to",
"a",
"background",
"thread",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/conn.py#L183-L197 | train | 42,133 |
openstack/monasca-common | monasca_common/kafka_lib/conn.py | KafkaConnection.close | def close(self):
"""
Shutdown and close the connection socket
"""
log.debug("Closing socket connection for %s:%d" % (self.host, self.port))
if self._sock:
# Call shutdown to be a good TCP client
# But expect an error if the socket has already been
# closed by the server
try:
self._sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
# Closing the socket should always succeed
self._sock.close()
self._sock = None
else:
log.debug("No socket found to close!") | python | def close(self):
"""
Shutdown and close the connection socket
"""
log.debug("Closing socket connection for %s:%d" % (self.host, self.port))
if self._sock:
# Call shutdown to be a good TCP client
# But expect an error if the socket has already been
# closed by the server
try:
self._sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
# Closing the socket should always succeed
self._sock.close()
self._sock = None
else:
log.debug("No socket found to close!") | [
"def",
"close",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"Closing socket connection for %s:%d\"",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")",
"if",
"self",
".",
"_sock",
":",
"# Call shutdown to be a good TCP client",
"# But ex... | Shutdown and close the connection socket | [
"Shutdown",
"and",
"close",
"the",
"connection",
"socket"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/conn.py#L199-L217 | train | 42,134 |
openstack/monasca-common | monasca_common/kafka_lib/consumer/kafka.py | KafkaConsumer.configure | def configure(self, **configs):
"""Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi
"""
configs = self._deprecate_configs(**configs)
self._config = {}
for key in self.DEFAULT_CONFIG:
self._config[key] = configs.pop(key, self.DEFAULT_CONFIG[key])
if configs:
raise KafkaConfigurationError('Unknown configuration key(s): ' +
str(list(configs.keys())))
if self._config['auto_commit_enable']:
if not self._config['group_id']:
raise KafkaConfigurationError(
'KafkaConsumer configured to auto-commit '
'without required consumer group (group_id)'
)
# Check auto-commit configuration
if self._config['auto_commit_enable']:
logger.info("Configuring consumer to auto-commit offsets")
self._reset_auto_commit()
if not self._config['bootstrap_servers']:
raise KafkaConfigurationError(
'bootstrap_servers required to configure KafkaConsumer'
)
self._client = KafkaClient(
self._config['bootstrap_servers'],
client_id=self._config['client_id'],
timeout=(self._config['socket_timeout_ms'] / 1000.0)
) | python | def configure(self, **configs):
"""Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi
"""
configs = self._deprecate_configs(**configs)
self._config = {}
for key in self.DEFAULT_CONFIG:
self._config[key] = configs.pop(key, self.DEFAULT_CONFIG[key])
if configs:
raise KafkaConfigurationError('Unknown configuration key(s): ' +
str(list(configs.keys())))
if self._config['auto_commit_enable']:
if not self._config['group_id']:
raise KafkaConfigurationError(
'KafkaConsumer configured to auto-commit '
'without required consumer group (group_id)'
)
# Check auto-commit configuration
if self._config['auto_commit_enable']:
logger.info("Configuring consumer to auto-commit offsets")
self._reset_auto_commit()
if not self._config['bootstrap_servers']:
raise KafkaConfigurationError(
'bootstrap_servers required to configure KafkaConsumer'
)
self._client = KafkaClient(
self._config['bootstrap_servers'],
client_id=self._config['client_id'],
timeout=(self._config['socket_timeout_ms'] / 1000.0)
) | [
"def",
"configure",
"(",
"self",
",",
"*",
"*",
"configs",
")",
":",
"configs",
"=",
"self",
".",
"_deprecate_configs",
"(",
"*",
"*",
"configs",
")",
"self",
".",
"_config",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"DEFAULT_CONFIG",
":",
"sel... | Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi | [
"Configure",
"the",
"consumer",
"instance"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/consumer/kafka.py#L75-L156 | train | 42,135 |
openstack/monasca-common | monasca_common/kafka_lib/consumer/kafka.py | KafkaConsumer.next | def next(self):
"""Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration
"""
self._set_consumer_timeout_start()
while True:
try:
return six.next(self._get_message_iterator())
# Handle batch completion
except StopIteration:
self._reset_message_iterator()
self._check_consumer_timeout() | python | def next(self):
"""Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration
"""
self._set_consumer_timeout_start()
while True:
try:
return six.next(self._get_message_iterator())
# Handle batch completion
except StopIteration:
self._reset_message_iterator()
self._check_consumer_timeout() | [
"def",
"next",
"(",
"self",
")",
":",
"self",
".",
"_set_consumer_timeout_start",
"(",
")",
"while",
"True",
":",
"try",
":",
"return",
"six",
".",
"next",
"(",
"self",
".",
"_get_message_iterator",
"(",
")",
")",
"# Handle batch completion",
"except",
"Stop... | Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration | [
"Return",
"the",
"next",
"available",
"message"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/consumer/kafka.py#L290-L315 | train | 42,136 |
openstack/monasca-common | monasca_common/kafka_lib/consumer/kafka.py | KafkaConsumer.offsets | def offsets(self, group=None):
"""Get internal consumer offset values
Keyword Arguments:
group: Either "fetch", "commit", "task_done", or "highwater".
If no group specified, returns all groups.
Returns:
A copy of internal offsets struct
"""
if not group:
return {
'fetch': self.offsets('fetch'),
'commit': self.offsets('commit'),
'task_done': self.offsets('task_done'),
'highwater': self.offsets('highwater')
}
else:
return dict(deepcopy(getattr(self._offsets, group))) | python | def offsets(self, group=None):
"""Get internal consumer offset values
Keyword Arguments:
group: Either "fetch", "commit", "task_done", or "highwater".
If no group specified, returns all groups.
Returns:
A copy of internal offsets struct
"""
if not group:
return {
'fetch': self.offsets('fetch'),
'commit': self.offsets('commit'),
'task_done': self.offsets('task_done'),
'highwater': self.offsets('highwater')
}
else:
return dict(deepcopy(getattr(self._offsets, group))) | [
"def",
"offsets",
"(",
"self",
",",
"group",
"=",
"None",
")",
":",
"if",
"not",
"group",
":",
"return",
"{",
"'fetch'",
":",
"self",
".",
"offsets",
"(",
"'fetch'",
")",
",",
"'commit'",
":",
"self",
".",
"offsets",
"(",
"'commit'",
")",
",",
"'ta... | Get internal consumer offset values
Keyword Arguments:
group: Either "fetch", "commit", "task_done", or "highwater".
If no group specified, returns all groups.
Returns:
A copy of internal offsets struct | [
"Get",
"internal",
"consumer",
"offset",
"values"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/consumer/kafka.py#L455-L473 | train | 42,137 |
openstack/monasca-common | monasca_common/kafka_lib/consumer/kafka.py | KafkaConsumer.task_done | def task_done(self, message):
"""Mark a fetched message as consumed.
Offsets for messages marked as "task_done" will be stored back
to the kafka cluster for this consumer group on commit()
Arguments:
message (KafkaMessage): the message to mark as complete
Returns:
True, unless the topic-partition for this message has not
been configured for the consumer. In normal operation, this
should not happen. But see github issue 364.
"""
topic_partition = (message.topic, message.partition)
if topic_partition not in self._topics:
logger.warning('Unrecognized topic/partition in task_done message: '
'{0}:{1}'.format(*topic_partition))
return False
offset = message.offset
# Warn on non-contiguous offsets
prev_done = self._offsets.task_done[topic_partition]
if prev_done is not None and offset != (prev_done + 1):
logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1',
offset, prev_done)
# Warn on smaller offsets than previous commit
# "commit" offsets are actually the offset of the next message to fetch.
prev_commit = self._offsets.commit[topic_partition]
if prev_commit is not None and ((offset + 1) <= prev_commit):
logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d',
offset, prev_commit)
self._offsets.task_done[topic_partition] = offset
# Check for auto-commit
if self._does_auto_commit_messages():
self._incr_auto_commit_message_count()
if self._should_auto_commit():
self.commit()
return True | python | def task_done(self, message):
"""Mark a fetched message as consumed.
Offsets for messages marked as "task_done" will be stored back
to the kafka cluster for this consumer group on commit()
Arguments:
message (KafkaMessage): the message to mark as complete
Returns:
True, unless the topic-partition for this message has not
been configured for the consumer. In normal operation, this
should not happen. But see github issue 364.
"""
topic_partition = (message.topic, message.partition)
if topic_partition not in self._topics:
logger.warning('Unrecognized topic/partition in task_done message: '
'{0}:{1}'.format(*topic_partition))
return False
offset = message.offset
# Warn on non-contiguous offsets
prev_done = self._offsets.task_done[topic_partition]
if prev_done is not None and offset != (prev_done + 1):
logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1',
offset, prev_done)
# Warn on smaller offsets than previous commit
# "commit" offsets are actually the offset of the next message to fetch.
prev_commit = self._offsets.commit[topic_partition]
if prev_commit is not None and ((offset + 1) <= prev_commit):
logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d',
offset, prev_commit)
self._offsets.task_done[topic_partition] = offset
# Check for auto-commit
if self._does_auto_commit_messages():
self._incr_auto_commit_message_count()
if self._should_auto_commit():
self.commit()
return True | [
"def",
"task_done",
"(",
"self",
",",
"message",
")",
":",
"topic_partition",
"=",
"(",
"message",
".",
"topic",
",",
"message",
".",
"partition",
")",
"if",
"topic_partition",
"not",
"in",
"self",
".",
"_topics",
":",
"logger",
".",
"warning",
"(",
"'Un... | Mark a fetched message as consumed.
Offsets for messages marked as "task_done" will be stored back
to the kafka cluster for this consumer group on commit()
Arguments:
message (KafkaMessage): the message to mark as complete
Returns:
True, unless the topic-partition for this message has not
been configured for the consumer. In normal operation, this
should not happen. But see github issue 364. | [
"Mark",
"a",
"fetched",
"message",
"as",
"consumed",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/consumer/kafka.py#L475-L519 | train | 42,138 |
openstack/monasca-common | monasca_common/rest/utils.py | as_json | def as_json(data, **kwargs):
"""Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str
"""
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
data = json.dumps(data, **kwargs)
return data | python | def as_json(data, **kwargs):
"""Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str
"""
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
data = json.dumps(data, **kwargs)
return data | [
"def",
"as_json",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'sort_keys'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'sort_keys'",
"]",
"=",
"False",
"if",
"'ensure_ascii'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'ensure_ascii'",
"]",
... | Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str | [
"Writes",
"data",
"as",
"json",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/rest/utils.py#L39-L55 | train | 42,139 |
openstack/monasca-common | monasca_common/rest/utils.py | read_body | def read_body(payload, content_type=JSON_CONTENT_TYPE):
"""Reads HTTP payload according to given content_type.
Function is capable of reading from payload stream.
Read data is then processed according to content_type.
Note:
Content-Type is validated. It means that if read_body
body is not capable of reading data in requested type,
it will throw an exception.
If read data was empty method will return false boolean
value to indicate that.
Note:
There is no transformation if content type is equal to
'text/plain'. What has been read is returned.
:param stream payload: payload to read, payload should have read method
:param str content_type: payload content type, default to application/json
:return: read data, returned type depends on content_type or False
if empty
:exception: :py:class:`.UnreadableBody` - in case of any failure when
reading data
"""
if content_type not in _READABLE_CONTENT_TYPES:
msg = ('Cannot read %s, not in %s' %
(content_type, _READABLE_CONTENT_TYPES))
raise exceptions.UnsupportedContentTypeException(msg)
try:
content = payload.read()
if not content:
return None
except Exception as ex:
raise exceptions.UnreadableContentError(str(ex))
return _READABLE_CONTENT_TYPES[content_type](content) | python | def read_body(payload, content_type=JSON_CONTENT_TYPE):
"""Reads HTTP payload according to given content_type.
Function is capable of reading from payload stream.
Read data is then processed according to content_type.
Note:
Content-Type is validated. It means that if read_body
body is not capable of reading data in requested type,
it will throw an exception.
If read data was empty method will return false boolean
value to indicate that.
Note:
There is no transformation if content type is equal to
'text/plain'. What has been read is returned.
:param stream payload: payload to read, payload should have read method
:param str content_type: payload content type, default to application/json
:return: read data, returned type depends on content_type or False
if empty
:exception: :py:class:`.UnreadableBody` - in case of any failure when
reading data
"""
if content_type not in _READABLE_CONTENT_TYPES:
msg = ('Cannot read %s, not in %s' %
(content_type, _READABLE_CONTENT_TYPES))
raise exceptions.UnsupportedContentTypeException(msg)
try:
content = payload.read()
if not content:
return None
except Exception as ex:
raise exceptions.UnreadableContentError(str(ex))
return _READABLE_CONTENT_TYPES[content_type](content) | [
"def",
"read_body",
"(",
"payload",
",",
"content_type",
"=",
"JSON_CONTENT_TYPE",
")",
":",
"if",
"content_type",
"not",
"in",
"_READABLE_CONTENT_TYPES",
":",
"msg",
"=",
"(",
"'Cannot read %s, not in %s'",
"%",
"(",
"content_type",
",",
"_READABLE_CONTENT_TYPES",
... | Reads HTTP payload according to given content_type.
Function is capable of reading from payload stream.
Read data is then processed according to content_type.
Note:
Content-Type is validated. It means that if read_body
body is not capable of reading data in requested type,
it will throw an exception.
If read data was empty method will return false boolean
value to indicate that.
Note:
There is no transformation if content type is equal to
'text/plain'. What has been read is returned.
:param stream payload: payload to read, payload should have read method
:param str content_type: payload content type, default to application/json
:return: read data, returned type depends on content_type or False
if empty
:exception: :py:class:`.UnreadableBody` - in case of any failure when
reading data | [
"Reads",
"HTTP",
"payload",
"according",
"to",
"given",
"content_type",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/rest/utils.py#L76-L115 | train | 42,140 |
robotpy/pyfrc | lib/pyfrc/sim/ui.py | SimUI.__process_idle_events | def __process_idle_events(self):
"""This should never be called directly, it is called via an
event, and should always be on the GUI thread"""
while True:
try:
callable, args = self.queue.get(block=False)
except queue.Empty:
break
callable(*args) | python | def __process_idle_events(self):
"""This should never be called directly, it is called via an
event, and should always be on the GUI thread"""
while True:
try:
callable, args = self.queue.get(block=False)
except queue.Empty:
break
callable(*args) | [
"def",
"__process_idle_events",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"callable",
",",
"args",
"=",
"self",
".",
"queue",
".",
"get",
"(",
"block",
"=",
"False",
")",
"except",
"queue",
".",
"Empty",
":",
"break",
"callable",
"(",
... | This should never be called directly, it is called via an
event, and should always be on the GUI thread | [
"This",
"should",
"never",
"be",
"called",
"directly",
"it",
"is",
"called",
"via",
"an",
"event",
"and",
"should",
"always",
"be",
"on",
"the",
"GUI",
"thread"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/ui.py#L568-L576 | train | 42,141 |
robotpy/pyfrc | lib/pyfrc/sim/ui.py | SimUI.timer_fired | def timer_fired(self):
"""Polling loop for events from other threads"""
self.__process_idle_events()
# grab the simulation lock, gather all of the
# wpilib objects, and display them on the screen
self.update_widgets()
# call next timer_fired (or we'll never call timer_fired again!)
delay = 100 # milliseconds
self.root.after(delay, self.timer_fired) | python | def timer_fired(self):
"""Polling loop for events from other threads"""
self.__process_idle_events()
# grab the simulation lock, gather all of the
# wpilib objects, and display them on the screen
self.update_widgets()
# call next timer_fired (or we'll never call timer_fired again!)
delay = 100 # milliseconds
self.root.after(delay, self.timer_fired) | [
"def",
"timer_fired",
"(",
"self",
")",
":",
"self",
".",
"__process_idle_events",
"(",
")",
"# grab the simulation lock, gather all of the",
"# wpilib objects, and display them on the screen",
"self",
".",
"update_widgets",
"(",
")",
"# call next timer_fired (or we'll never call... | Polling loop for events from other threads | [
"Polling",
"loop",
"for",
"events",
"from",
"other",
"threads"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/ui.py#L582-L592 | train | 42,142 |
openstack/monasca-common | monasca_common/kafka_lib/codec.py | snappy_encode | def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024):
"""Encodes the given data with snappy if xerial_compatible is set then the
stream is encoded in a fashion compatible with the xerial snappy library
The block size (xerial_blocksize) controls how frequent the blocking
occurs 32k is the default in the xerial library.
The format winds up being
+-------------+------------+--------------+------------+--------------+
| Header | Block1 len | Block1 data | Blockn len | Blockn data |
|-------------+------------+--------------+------------+--------------|
| 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes |
+-------------+------------+--------------+------------+--------------+
It is important to not that the blocksize is the amount of uncompressed
data presented to snappy at each block, whereas the blocklen is the
number of bytes that will be present in the stream, that is the
length will always be <= blocksize.
"""
if not has_snappy():
raise NotImplementedError("Snappy codec is not available")
if xerial_compatible:
def _chunker():
for i in xrange(0, len(payload), xerial_blocksize):
yield payload[i:i + xerial_blocksize]
out = BytesIO()
header = b''.join([struct.pack('!' + fmt, dat) for fmt, dat
in zip(_XERIAL_V1_FORMAT, _XERIAL_V1_HEADER)])
out.write(header)
for chunk in _chunker():
block = snappy.compress(chunk)
block_size = len(block)
out.write(struct.pack('!i', block_size))
out.write(block)
out.seek(0)
return out.read()
else:
return snappy.compress(payload) | python | def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024):
"""Encodes the given data with snappy if xerial_compatible is set then the
stream is encoded in a fashion compatible with the xerial snappy library
The block size (xerial_blocksize) controls how frequent the blocking
occurs 32k is the default in the xerial library.
The format winds up being
+-------------+------------+--------------+------------+--------------+
| Header | Block1 len | Block1 data | Blockn len | Blockn data |
|-------------+------------+--------------+------------+--------------|
| 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes |
+-------------+------------+--------------+------------+--------------+
It is important to not that the blocksize is the amount of uncompressed
data presented to snappy at each block, whereas the blocklen is the
number of bytes that will be present in the stream, that is the
length will always be <= blocksize.
"""
if not has_snappy():
raise NotImplementedError("Snappy codec is not available")
if xerial_compatible:
def _chunker():
for i in xrange(0, len(payload), xerial_blocksize):
yield payload[i:i + xerial_blocksize]
out = BytesIO()
header = b''.join([struct.pack('!' + fmt, dat) for fmt, dat
in zip(_XERIAL_V1_FORMAT, _XERIAL_V1_HEADER)])
out.write(header)
for chunk in _chunker():
block = snappy.compress(chunk)
block_size = len(block)
out.write(struct.pack('!i', block_size))
out.write(block)
out.seek(0)
return out.read()
else:
return snappy.compress(payload) | [
"def",
"snappy_encode",
"(",
"payload",
",",
"xerial_compatible",
"=",
"False",
",",
"xerial_blocksize",
"=",
"32",
"*",
"1024",
")",
":",
"if",
"not",
"has_snappy",
"(",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Snappy codec is not available\"",
")",
"i... | Encodes the given data with snappy if xerial_compatible is set then the
stream is encoded in a fashion compatible with the xerial snappy library
The block size (xerial_blocksize) controls how frequent the blocking
occurs 32k is the default in the xerial library.
The format winds up being
+-------------+------------+--------------+------------+--------------+
| Header | Block1 len | Block1 data | Blockn len | Blockn data |
|-------------+------------+--------------+------------+--------------|
| 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes |
+-------------+------------+--------------+------------+--------------+
It is important to not that the blocksize is the amount of uncompressed
data presented to snappy at each block, whereas the blocklen is the
number of bytes that will be present in the stream, that is the
length will always be <= blocksize. | [
"Encodes",
"the",
"given",
"data",
"with",
"snappy",
"if",
"xerial_compatible",
"is",
"set",
"then",
"the",
"stream",
"is",
"encoded",
"in",
"a",
"fashion",
"compatible",
"with",
"the",
"xerial",
"snappy",
"library"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/codec.py#L70-L114 | train | 42,143 |
robotpy/pyfrc | lib/pyfrc/mains/cli_deploy.py | relpath | def relpath(path):
"""Path helper, gives you a path relative to this file"""
return os.path.normpath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
) | python | def relpath(path):
"""Path helper, gives you a path relative to this file"""
return os.path.normpath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
) | [
"def",
"relpath",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"path",
... | Path helper, gives you a path relative to this file | [
"Path",
"helper",
"gives",
"you",
"a",
"path",
"relative",
"to",
"this",
"file"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/mains/cli_deploy.py#L22-L26 | train | 42,144 |
openstack/monasca-common | monasca_common/policy/policy_engine.py | init | def init(policy_file=None, rules=None, default_rule=None, use_conf=True):
"""Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
`CONF.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file.
"""
global _ENFORCER
global saved_file_rules
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf
)
register_rules(_ENFORCER)
_ENFORCER.load_rules()
# Only the rules which are loaded from file may be changed
current_file_rules = _ENFORCER.file_rules
current_file_rules = _serialize_rules(current_file_rules)
if saved_file_rules != current_file_rules:
_warning_for_deprecated_user_based_rules(current_file_rules)
saved_file_rules = copy.deepcopy(current_file_rules) | python | def init(policy_file=None, rules=None, default_rule=None, use_conf=True):
"""Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
`CONF.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file.
"""
global _ENFORCER
global saved_file_rules
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf
)
register_rules(_ENFORCER)
_ENFORCER.load_rules()
# Only the rules which are loaded from file may be changed
current_file_rules = _ENFORCER.file_rules
current_file_rules = _serialize_rules(current_file_rules)
if saved_file_rules != current_file_rules:
_warning_for_deprecated_user_based_rules(current_file_rules)
saved_file_rules = copy.deepcopy(current_file_rules) | [
"def",
"init",
"(",
"policy_file",
"=",
"None",
",",
"rules",
"=",
"None",
",",
"default_rule",
"=",
"None",
",",
"use_conf",
"=",
"True",
")",
":",
"global",
"_ENFORCER",
"global",
"saved_file_rules",
"if",
"not",
"_ENFORCER",
":",
"_ENFORCER",
"=",
"poli... | Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
`CONF.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file. | [
"Init",
"an",
"Enforcer",
"class",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L52-L82 | train | 42,145 |
openstack/monasca-common | monasca_common/policy/policy_engine.py | _serialize_rules | def _serialize_rules(rules):
"""Serialize all the Rule object as string.
New string is used to compare the rules list.
"""
result = [(rule_name, str(rule)) for rule_name, rule in rules.items()]
return sorted(result, key=lambda rule: rule[0]) | python | def _serialize_rules(rules):
"""Serialize all the Rule object as string.
New string is used to compare the rules list.
"""
result = [(rule_name, str(rule)) for rule_name, rule in rules.items()]
return sorted(result, key=lambda rule: rule[0]) | [
"def",
"_serialize_rules",
"(",
"rules",
")",
":",
"result",
"=",
"[",
"(",
"rule_name",
",",
"str",
"(",
"rule",
")",
")",
"for",
"rule_name",
",",
"rule",
"in",
"rules",
".",
"items",
"(",
")",
"]",
"return",
"sorted",
"(",
"result",
",",
"key",
... | Serialize all the Rule object as string.
New string is used to compare the rules list. | [
"Serialize",
"all",
"the",
"Rule",
"object",
"as",
"string",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L85-L91 | train | 42,146 |
openstack/monasca-common | monasca_common/policy/policy_engine.py | _warning_for_deprecated_user_based_rules | def _warning_for_deprecated_user_based_rules(rules):
"""Warning user based policy enforcement used in the rule but the rule
doesn't support it.
"""
for rule in rules:
# We will skip the warning for the resources which support user based
# policy enforcement.
if [resource for resource in USER_BASED_RESOURCES
if resource in rule[0]]:
continue
if 'user_id' in KEY_EXPR.findall(rule[1]):
LOG.warning(_LW("The user_id attribute isn't supported in the "
"rule '%s'. All the user_id based policy "
"enforcement will be removed in the "
"future."), rule[0]) | python | def _warning_for_deprecated_user_based_rules(rules):
"""Warning user based policy enforcement used in the rule but the rule
doesn't support it.
"""
for rule in rules:
# We will skip the warning for the resources which support user based
# policy enforcement.
if [resource for resource in USER_BASED_RESOURCES
if resource in rule[0]]:
continue
if 'user_id' in KEY_EXPR.findall(rule[1]):
LOG.warning(_LW("The user_id attribute isn't supported in the "
"rule '%s'. All the user_id based policy "
"enforcement will be removed in the "
"future."), rule[0]) | [
"def",
"_warning_for_deprecated_user_based_rules",
"(",
"rules",
")",
":",
"for",
"rule",
"in",
"rules",
":",
"# We will skip the warning for the resources which support user based",
"# policy enforcement.",
"if",
"[",
"resource",
"for",
"resource",
"in",
"USER_BASED_RESOURCES"... | Warning user based policy enforcement used in the rule but the rule
doesn't support it. | [
"Warning",
"user",
"based",
"policy",
"enforcement",
"used",
"in",
"the",
"rule",
"but",
"the",
"rule",
"doesn",
"t",
"support",
"it",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L94-L108 | train | 42,147 |
openstack/monasca-common | monasca_common/policy/policy_engine.py | authorize | def authorize(context, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
:param context: monasca project context
:param action: String representing the action to be checked. This
should be colon separated for clarity.
:param target: Dictionary representing the object of the action for
object creation. This should be a dictionary representing
the location of the object e.g.
``{'project_id': 'context.project_id'}``
:param do_raise: if True (the default), raises PolicyNotAuthorized,
if False returns False
:type context: object
:type action: str
:type target: dict
:type do_raise: bool
:return: returns a non-False value (not necessarily True) if authorized,
and the False if not authorized and do_raise if False
:raises oslo_policy.policy.PolicyNotAuthorized: if verification fails
"""
init()
credentials = context.to_policy_values()
try:
result = _ENFORCER.authorize(action, target, credentials,
do_raise=do_raise, action=action)
return result
except policy.PolicyNotRegistered:
LOG.exception('Policy not registered')
raise
except Exception:
LOG.debug('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': credentials})
raise | python | def authorize(context, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
:param context: monasca project context
:param action: String representing the action to be checked. This
should be colon separated for clarity.
:param target: Dictionary representing the object of the action for
object creation. This should be a dictionary representing
the location of the object e.g.
``{'project_id': 'context.project_id'}``
:param do_raise: if True (the default), raises PolicyNotAuthorized,
if False returns False
:type context: object
:type action: str
:type target: dict
:type do_raise: bool
:return: returns a non-False value (not necessarily True) if authorized,
and the False if not authorized and do_raise if False
:raises oslo_policy.policy.PolicyNotAuthorized: if verification fails
"""
init()
credentials = context.to_policy_values()
try:
result = _ENFORCER.authorize(action, target, credentials,
do_raise=do_raise, action=action)
return result
except policy.PolicyNotRegistered:
LOG.exception('Policy not registered')
raise
except Exception:
LOG.debug('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': credentials})
raise | [
"def",
"authorize",
"(",
"context",
",",
"action",
",",
"target",
",",
"do_raise",
"=",
"True",
")",
":",
"init",
"(",
")",
"credentials",
"=",
"context",
".",
"to_policy_values",
"(",
")",
"try",
":",
"result",
"=",
"_ENFORCER",
".",
"authorize",
"(",
... | Verify that the action is valid on the target in this context.
:param context: monasca project context
:param action: String representing the action to be checked. This
should be colon separated for clarity.
:param target: Dictionary representing the object of the action for
object creation. This should be a dictionary representing
the location of the object e.g.
``{'project_id': 'context.project_id'}``
:param do_raise: if True (the default), raises PolicyNotAuthorized,
if False returns False
:type context: object
:type action: str
:type target: dict
:type do_raise: bool
:return: returns a non-False value (not necessarily True) if authorized,
and the False if not authorized and do_raise if False
:raises oslo_policy.policy.PolicyNotAuthorized: if verification fails | [
"Verify",
"that",
"the",
"action",
"is",
"valid",
"on",
"the",
"target",
"in",
"this",
"context",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L117-L151 | train | 42,148 |
openstack/monasca-common | monasca_common/policy/policy_engine.py | check_is_admin | def check_is_admin(context):
"""Check if roles contains 'admin' role according to policy settings."""
init()
credentials = context.to_policy_values()
target = credentials
return _ENFORCER.authorize('admin_required', target, credentials) | python | def check_is_admin(context):
"""Check if roles contains 'admin' role according to policy settings."""
init()
credentials = context.to_policy_values()
target = credentials
return _ENFORCER.authorize('admin_required', target, credentials) | [
"def",
"check_is_admin",
"(",
"context",
")",
":",
"init",
"(",
")",
"credentials",
"=",
"context",
".",
"to_policy_values",
"(",
")",
"target",
"=",
"credentials",
"return",
"_ENFORCER",
".",
"authorize",
"(",
"'admin_required'",
",",
"target",
",",
"credenti... | Check if roles contains 'admin' role according to policy settings. | [
"Check",
"if",
"roles",
"contains",
"admin",
"role",
"according",
"to",
"policy",
"settings",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L154-L159 | train | 42,149 |
openstack/monasca-common | monasca_common/policy/policy_engine.py | set_rules | def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover
"""Set rules based on the provided dict of rules.
Note:
Used in tests only.
:param rules: New rules to use. It should be an instance of dict
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from config file.
"""
init(use_conf=False)
_ENFORCER.set_rules(rules, overwrite, use_conf) | python | def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover
"""Set rules based on the provided dict of rules.
Note:
Used in tests only.
:param rules: New rules to use. It should be an instance of dict
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from config file.
"""
init(use_conf=False)
_ENFORCER.set_rules(rules, overwrite, use_conf) | [
"def",
"set_rules",
"(",
"rules",
",",
"overwrite",
"=",
"True",
",",
"use_conf",
"=",
"False",
")",
":",
"# pragma: no cover",
"init",
"(",
"use_conf",
"=",
"False",
")",
"_ENFORCER",
".",
"set_rules",
"(",
"rules",
",",
"overwrite",
",",
"use_conf",
")"
... | Set rules based on the provided dict of rules.
Note:
Used in tests only.
:param rules: New rules to use. It should be an instance of dict
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from config file. | [
"Set",
"rules",
"based",
"on",
"the",
"provided",
"dict",
"of",
"rules",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L162-L174 | train | 42,150 |
openstack/monasca-common | monasca_common/policy/policy_engine.py | verify_deprecated_policy | def verify_deprecated_policy(old_policy, new_policy, default_rule, context):
"""Check the rule of the deprecated policy action
If the current rule of the deprecated policy action is set to a non-default
value, then a warning message is logged stating that the new policy
action should be used to dictate permissions as the old policy action is
being deprecated.
:param old_policy: policy action that is being deprecated
:param new_policy: policy action that is replacing old_policy
:param default_rule: the old_policy action default rule value
:param context: the monasca context
"""
if _ENFORCER:
current_rule = str(_ENFORCER.rules[old_policy])
else:
current_rule = None
if current_rule != default_rule:
LOG.warning("Start using the new action '{0}'. The existing "
"action '{1}' is being deprecated and will be "
"removed in future release.".format(new_policy,
old_policy))
target = {'project_id': context.project_id,
'user_id': context.user_id}
return authorize(context=context, action=old_policy, target=target)
else:
return False | python | def verify_deprecated_policy(old_policy, new_policy, default_rule, context):
"""Check the rule of the deprecated policy action
If the current rule of the deprecated policy action is set to a non-default
value, then a warning message is logged stating that the new policy
action should be used to dictate permissions as the old policy action is
being deprecated.
:param old_policy: policy action that is being deprecated
:param new_policy: policy action that is replacing old_policy
:param default_rule: the old_policy action default rule value
:param context: the monasca context
"""
if _ENFORCER:
current_rule = str(_ENFORCER.rules[old_policy])
else:
current_rule = None
if current_rule != default_rule:
LOG.warning("Start using the new action '{0}'. The existing "
"action '{1}' is being deprecated and will be "
"removed in future release.".format(new_policy,
old_policy))
target = {'project_id': context.project_id,
'user_id': context.user_id}
return authorize(context=context, action=old_policy, target=target)
else:
return False | [
"def",
"verify_deprecated_policy",
"(",
"old_policy",
",",
"new_policy",
",",
"default_rule",
",",
"context",
")",
":",
"if",
"_ENFORCER",
":",
"current_rule",
"=",
"str",
"(",
"_ENFORCER",
".",
"rules",
"[",
"old_policy",
"]",
")",
"else",
":",
"current_rule"... | Check the rule of the deprecated policy action
If the current rule of the deprecated policy action is set to a non-default
value, then a warning message is logged stating that the new policy
action should be used to dictate permissions as the old policy action is
being deprecated.
:param old_policy: policy action that is being deprecated
:param new_policy: policy action that is replacing old_policy
:param default_rule: the old_policy action default rule value
:param context: the monasca context | [
"Check",
"the",
"rule",
"of",
"the",
"deprecated",
"policy",
"action"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/policy/policy_engine.py#L177-L206 | train | 42,151 |
partofthething/ace | ace/samples/wang04.py | build_sample_ace_problem_wang04 | def build_sample_ace_problem_wang04(N=100):
"""Build sample problem from Wang 2004."""
x = [numpy.random.uniform(-1, 1, size=N)
for _i in range(0, 5)]
noise = numpy.random.standard_normal(N)
y = numpy.log(4.0 + numpy.sin(4 * x[0]) + numpy.abs(x[1]) + x[2] ** 2 +
x[3] ** 3 + x[4] + 0.1 * noise)
return x, y | python | def build_sample_ace_problem_wang04(N=100):
"""Build sample problem from Wang 2004."""
x = [numpy.random.uniform(-1, 1, size=N)
for _i in range(0, 5)]
noise = numpy.random.standard_normal(N)
y = numpy.log(4.0 + numpy.sin(4 * x[0]) + numpy.abs(x[1]) + x[2] ** 2 +
x[3] ** 3 + x[4] + 0.1 * noise)
return x, y | [
"def",
"build_sample_ace_problem_wang04",
"(",
"N",
"=",
"100",
")",
":",
"x",
"=",
"[",
"numpy",
".",
"random",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
",",
"size",
"=",
"N",
")",
"for",
"_i",
"in",
"range",
"(",
"0",
",",
"5",
")",
"]",
"noi... | Build sample problem from Wang 2004. | [
"Build",
"sample",
"problem",
"from",
"Wang",
"2004",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/samples/wang04.py#L7-L14 | train | 42,152 |
partofthething/ace | ace/samples/wang04.py | run_wang04 | def run_wang04():
"""Run sample problem."""
x, y = build_sample_ace_problem_wang04(N=200)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
ace.plot_transforms(ace_solver, 'ace_transforms_wang04.png')
ace.plot_input(ace_solver, 'ace_input_wang04.png')
except ImportError:
pass
return ace_solver | python | def run_wang04():
"""Run sample problem."""
x, y = build_sample_ace_problem_wang04(N=200)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
ace.plot_transforms(ace_solver, 'ace_transforms_wang04.png')
ace.plot_input(ace_solver, 'ace_input_wang04.png')
except ImportError:
pass
return ace_solver | [
"def",
"run_wang04",
"(",
")",
":",
"x",
",",
"y",
"=",
"build_sample_ace_problem_wang04",
"(",
"N",
"=",
"200",
")",
"ace_solver",
"=",
"ace",
".",
"ACESolver",
"(",
")",
"ace_solver",
".",
"specify_data_set",
"(",
"x",
",",
"y",
")",
"ace_solver",
".",... | Run sample problem. | [
"Run",
"sample",
"problem",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/samples/wang04.py#L16-L28 | train | 42,153 |
robotpy/pyfrc | lib/pyfrc/sim/sim_manager.py | SimManager.add_robot | def add_robot(self, controller):
"""Add a robot controller"""
# connect to the controller
# -> this is to support module robots
controller.on_mode_change(self._on_robot_mode_change)
self.robots.append(controller) | python | def add_robot(self, controller):
"""Add a robot controller"""
# connect to the controller
# -> this is to support module robots
controller.on_mode_change(self._on_robot_mode_change)
self.robots.append(controller) | [
"def",
"add_robot",
"(",
"self",
",",
"controller",
")",
":",
"# connect to the controller",
"# -> this is to support module robots",
"controller",
".",
"on_mode_change",
"(",
"self",
".",
"_on_robot_mode_change",
")",
"self",
".",
"robots",
".",
"append",
"(",
"contr... | Add a robot controller | [
"Add",
"a",
"robot",
"controller"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/sim_manager.py#L42-L48 | train | 42,154 |
robotpy/pyfrc | lib/pyfrc/sim/sim_manager.py | SimManager.set_joystick | def set_joystick(self, x, y, n):
"""
Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to
"""
self.robots[n].set_joystick(x, y) | python | def set_joystick(self, x, y, n):
"""
Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to
"""
self.robots[n].set_joystick(x, y) | [
"def",
"set_joystick",
"(",
"self",
",",
"x",
",",
"y",
",",
"n",
")",
":",
"self",
".",
"robots",
"[",
"n",
"]",
".",
"set_joystick",
"(",
"x",
",",
"y",
")"
] | Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to | [
"Receives",
"joystick",
"values",
"from",
"the",
"SnakeBoard",
"x",
"y",
"Coordinates",
"n",
"Robot",
"number",
"to",
"give",
"it",
"to"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/sim_manager.py#L74-L81 | train | 42,155 |
igorcoding/asynctnt | asynctnt/connection.py | Connection.close | def close(self):
"""
Same as disconnect, but not a coroutine, i.e. it does not wait
for disconnect to finish.
"""
if self._state == ConnectionState.DISCONNECTED:
return
self._set_state(ConnectionState.DISCONNECTING)
logger.info('%s Disconnecting...', self.fingerprint)
if self._reconnect_task and not self._reconnect_task.done():
self._reconnect_task.cancel()
self._reconnect_task = None
if self._ping_task and not self._ping_task.done():
self._ping_task.cancel()
self._ping_task = None
if self._transport:
self._transport.close()
self._transport = None
self._protocol = None
self._disconnect_waiter = None
self._db = _DbMock()
self._set_state(ConnectionState.DISCONNECTED) | python | def close(self):
"""
Same as disconnect, but not a coroutine, i.e. it does not wait
for disconnect to finish.
"""
if self._state == ConnectionState.DISCONNECTED:
return
self._set_state(ConnectionState.DISCONNECTING)
logger.info('%s Disconnecting...', self.fingerprint)
if self._reconnect_task and not self._reconnect_task.done():
self._reconnect_task.cancel()
self._reconnect_task = None
if self._ping_task and not self._ping_task.done():
self._ping_task.cancel()
self._ping_task = None
if self._transport:
self._transport.close()
self._transport = None
self._protocol = None
self._disconnect_waiter = None
self._db = _DbMock()
self._set_state(ConnectionState.DISCONNECTED) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_state",
"==",
"ConnectionState",
".",
"DISCONNECTED",
":",
"return",
"self",
".",
"_set_state",
"(",
"ConnectionState",
".",
"DISCONNECTING",
")",
"logger",
".",
"info",
"(",
"'%s Disconnecting...'",
... | Same as disconnect, but not a coroutine, i.e. it does not wait
for disconnect to finish. | [
"Same",
"as",
"disconnect",
"but",
"not",
"a",
"coroutine",
"i",
".",
"e",
".",
"it",
"does",
"not",
"wait",
"for",
"disconnect",
"to",
"finish",
"."
] | 6a25833ed6ab4831aeefac596539171693f846fe | https://github.com/igorcoding/asynctnt/blob/6a25833ed6ab4831aeefac596539171693f846fe/asynctnt/connection.py#L401-L428 | train | 42,156 |
igorcoding/asynctnt | asynctnt/connection.py | Connection.eval | def eval(self, expression, args=None, *,
timeout=-1.0, push_subscribe=False) -> _MethodRet:
"""
Eval request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.eval('return 42')
<Response sync=3 rowcount=1 data=[42]>
>>> await conn.eval('return box.info.version')
<Response sync=3 rowcount=1 data=['2.1.1-7-gd381a45b6']>
:param expression: expression to execute
:param args: arguments to pass to the function, that will
execute your expression (list object)
:param timeout: Request timeout
:param push_subscribe: Subscribe to push messages
:returns: :class:`asynctnt.Response` instance
"""
return self._db.eval(expression, args,
timeout=timeout, push_subscribe=push_subscribe) | python | def eval(self, expression, args=None, *,
timeout=-1.0, push_subscribe=False) -> _MethodRet:
"""
Eval request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.eval('return 42')
<Response sync=3 rowcount=1 data=[42]>
>>> await conn.eval('return box.info.version')
<Response sync=3 rowcount=1 data=['2.1.1-7-gd381a45b6']>
:param expression: expression to execute
:param args: arguments to pass to the function, that will
execute your expression (list object)
:param timeout: Request timeout
:param push_subscribe: Subscribe to push messages
:returns: :class:`asynctnt.Response` instance
"""
return self._db.eval(expression, args,
timeout=timeout, push_subscribe=push_subscribe) | [
"def",
"eval",
"(",
"self",
",",
"expression",
",",
"args",
"=",
"None",
",",
"*",
",",
"timeout",
"=",
"-",
"1.0",
",",
"push_subscribe",
"=",
"False",
")",
"->",
"_MethodRet",
":",
"return",
"self",
".",
"_db",
".",
"eval",
"(",
"expression",
",",
... | Eval request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.eval('return 42')
<Response sync=3 rowcount=1 data=[42]>
>>> await conn.eval('return box.info.version')
<Response sync=3 rowcount=1 data=['2.1.1-7-gd381a45b6']>
:param expression: expression to execute
:param args: arguments to pass to the function, that will
execute your expression (list object)
:param timeout: Request timeout
:param push_subscribe: Subscribe to push messages
:returns: :class:`asynctnt.Response` instance | [
"Eval",
"request",
"coroutine",
"."
] | 6a25833ed6ab4831aeefac596539171693f846fe | https://github.com/igorcoding/asynctnt/blob/6a25833ed6ab4831aeefac596539171693f846fe/asynctnt/connection.py#L654-L679 | train | 42,157 |
igorcoding/asynctnt | asynctnt/connection.py | Connection.select | def select(self, space, key=None, **kwargs) -> _MethodRet:
"""
Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.select(space, key, **kwargs) | python | def select(self, space, key=None, **kwargs) -> _MethodRet:
"""
Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.select(space, key, **kwargs) | [
"def",
"select",
"(",
"self",
",",
"space",
",",
"key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"_MethodRet",
":",
"return",
"self",
".",
"_db",
".",
"select",
"(",
"space",
",",
"key",
",",
"*",
"*",
"kwargs",
")"
] | Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | [
"Select",
"request",
"coroutine",
"."
] | 6a25833ed6ab4831aeefac596539171693f846fe | https://github.com/igorcoding/asynctnt/blob/6a25833ed6ab4831aeefac596539171693f846fe/asynctnt/connection.py#L681-L724 | train | 42,158 |
igorcoding/asynctnt | asynctnt/connection.py | Connection.insert | def insert(self, space, t, *, replace=False, timeout=-1) -> _MethodRet:
"""
Insert request coroutine.
Examples:
.. code-block:: pycon
# Basic usage
>>> await conn.insert('tester', [0, 'hello'])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
# Using dict as an argument tuple
>>> await conn.insert('tester', {
... 'id': 0
... 'text': 'hell0'
... })
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param t: tuple to insert (list object)
:param replace: performs replace request instead of insert
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.insert(space, t,
replace=replace,
timeout=timeout) | python | def insert(self, space, t, *, replace=False, timeout=-1) -> _MethodRet:
"""
Insert request coroutine.
Examples:
.. code-block:: pycon
# Basic usage
>>> await conn.insert('tester', [0, 'hello'])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
# Using dict as an argument tuple
>>> await conn.insert('tester', {
... 'id': 0
... 'text': 'hell0'
... })
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param t: tuple to insert (list object)
:param replace: performs replace request instead of insert
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.insert(space, t,
replace=replace,
timeout=timeout) | [
"def",
"insert",
"(",
"self",
",",
"space",
",",
"t",
",",
"*",
",",
"replace",
"=",
"False",
",",
"timeout",
"=",
"-",
"1",
")",
"->",
"_MethodRet",
":",
"return",
"self",
".",
"_db",
".",
"insert",
"(",
"space",
",",
"t",
",",
"replace",
"=",
... | Insert request coroutine.
Examples:
.. code-block:: pycon
# Basic usage
>>> await conn.insert('tester', [0, 'hello'])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
# Using dict as an argument tuple
>>> await conn.insert('tester', {
... 'id': 0
... 'text': 'hell0'
... })
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param t: tuple to insert (list object)
:param replace: performs replace request instead of insert
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | [
"Insert",
"request",
"coroutine",
"."
] | 6a25833ed6ab4831aeefac596539171693f846fe | https://github.com/igorcoding/asynctnt/blob/6a25833ed6ab4831aeefac596539171693f846fe/asynctnt/connection.py#L726-L758 | train | 42,159 |
igorcoding/asynctnt | asynctnt/connection.py | Connection.replace | def replace(self, space, t, *, timeout=-1.0) -> _MethodRet:
"""
Replace request coroutine. Same as insert, but replace.
:param space: space id or space name.
:param t: tuple to insert (list object)
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.replace(space, t, timeout=timeout) | python | def replace(self, space, t, *, timeout=-1.0) -> _MethodRet:
"""
Replace request coroutine. Same as insert, but replace.
:param space: space id or space name.
:param t: tuple to insert (list object)
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.replace(space, t, timeout=timeout) | [
"def",
"replace",
"(",
"self",
",",
"space",
",",
"t",
",",
"*",
",",
"timeout",
"=",
"-",
"1.0",
")",
"->",
"_MethodRet",
":",
"return",
"self",
".",
"_db",
".",
"replace",
"(",
"space",
",",
"t",
",",
"timeout",
"=",
"timeout",
")"
] | Replace request coroutine. Same as insert, but replace.
:param space: space id or space name.
:param t: tuple to insert (list object)
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | [
"Replace",
"request",
"coroutine",
".",
"Same",
"as",
"insert",
"but",
"replace",
"."
] | 6a25833ed6ab4831aeefac596539171693f846fe | https://github.com/igorcoding/asynctnt/blob/6a25833ed6ab4831aeefac596539171693f846fe/asynctnt/connection.py#L760-L770 | train | 42,160 |
igorcoding/asynctnt | asynctnt/connection.py | Connection.delete | def delete(self, space, key, **kwargs) -> _MethodRet:
"""
Delete request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.delete('tester', [0])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param key: key to delete
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.delete(space, key, **kwargs) | python | def delete(self, space, key, **kwargs) -> _MethodRet:
"""
Delete request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.delete('tester', [0])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param key: key to delete
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.delete(space, key, **kwargs) | [
"def",
"delete",
"(",
"self",
",",
"space",
",",
"key",
",",
"*",
"*",
"kwargs",
")",
"->",
"_MethodRet",
":",
"return",
"self",
".",
"_db",
".",
"delete",
"(",
"space",
",",
"key",
",",
"*",
"*",
"kwargs",
")"
] | Delete request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.delete('tester', [0])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param key: key to delete
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | [
"Delete",
"request",
"coroutine",
"."
] | 6a25833ed6ab4831aeefac596539171693f846fe | https://github.com/igorcoding/asynctnt/blob/6a25833ed6ab4831aeefac596539171693f846fe/asynctnt/connection.py#L772-L794 | train | 42,161 |
igorcoding/asynctnt | asynctnt/connection.py | Connection.update | def update(self, space, key, operations, **kwargs) -> _MethodRet:
"""
Update request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.update('tester', [0], [ ['=', 1, 'hi!'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hi!'>
]>
# you can use fields names as well
>>> res = await conn.update('tester', [0],
... [ ['=', 'text', 'hola'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hola'>
]>
:param space: space id or space name.
:param key: key to update
:param operations:
Operations list of the following format:
[ [op_type, field_no, ...], ... ]. Please refer to
https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update
You can use field numbers as well as their names in space
format as a field_no (if only fetch_schema is True).
If field is unknown then TarantoolSchemaError is raised.
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.update(space, key, operations, **kwargs) | python | def update(self, space, key, operations, **kwargs) -> _MethodRet:
"""
Update request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.update('tester', [0], [ ['=', 1, 'hi!'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hi!'>
]>
# you can use fields names as well
>>> res = await conn.update('tester', [0],
... [ ['=', 'text', 'hola'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hola'>
]>
:param space: space id or space name.
:param key: key to update
:param operations:
Operations list of the following format:
[ [op_type, field_no, ...], ... ]. Please refer to
https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update
You can use field numbers as well as their names in space
format as a field_no (if only fetch_schema is True).
If field is unknown then TarantoolSchemaError is raised.
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.update(space, key, operations, **kwargs) | [
"def",
"update",
"(",
"self",
",",
"space",
",",
"key",
",",
"operations",
",",
"*",
"*",
"kwargs",
")",
"->",
"_MethodRet",
":",
"return",
"self",
".",
"_db",
".",
"update",
"(",
"space",
",",
"key",
",",
"operations",
",",
"*",
"*",
"kwargs",
")"... | Update request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.update('tester', [0], [ ['=', 1, 'hi!'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hi!'>
]>
# you can use fields names as well
>>> res = await conn.update('tester', [0],
... [ ['=', 'text', 'hola'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hola'>
]>
:param space: space id or space name.
:param key: key to update
:param operations:
Operations list of the following format:
[ [op_type, field_no, ...], ... ]. Please refer to
https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update
You can use field numbers as well as their names in space
format as a field_no (if only fetch_schema is True).
If field is unknown then TarantoolSchemaError is raised.
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | [
"Update",
"request",
"coroutine",
"."
] | 6a25833ed6ab4831aeefac596539171693f846fe | https://github.com/igorcoding/asynctnt/blob/6a25833ed6ab4831aeefac596539171693f846fe/asynctnt/connection.py#L796-L832 | train | 42,162 |
partofthething/ace | ace/supersmoother.py | SuperSmoother.compute | def compute(self):
"""Run the SuperSmoother."""
self._compute_primary_smooths()
self._smooth_the_residuals()
self._select_best_smooth_at_each_point()
self._enhance_bass()
self._smooth_best_span_estimates()
self._apply_best_spans_to_primaries()
self._smooth_interpolated_smooth()
self._store_unsorted_results(self.smooth_result, numpy.zeros(len(self.smooth_result))) | python | def compute(self):
"""Run the SuperSmoother."""
self._compute_primary_smooths()
self._smooth_the_residuals()
self._select_best_smooth_at_each_point()
self._enhance_bass()
self._smooth_best_span_estimates()
self._apply_best_spans_to_primaries()
self._smooth_interpolated_smooth()
self._store_unsorted_results(self.smooth_result, numpy.zeros(len(self.smooth_result))) | [
"def",
"compute",
"(",
"self",
")",
":",
"self",
".",
"_compute_primary_smooths",
"(",
")",
"self",
".",
"_smooth_the_residuals",
"(",
")",
"self",
".",
"_select_best_smooth_at_each_point",
"(",
")",
"self",
".",
"_enhance_bass",
"(",
")",
"self",
".",
"_smoot... | Run the SuperSmoother. | [
"Run",
"the",
"SuperSmoother",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/supersmoother.py#L53-L62 | train | 42,163 |
partofthething/ace | ace/supersmoother.py | SuperSmoother._compute_primary_smooths | def _compute_primary_smooths(self):
"""Compute fixed-span smooths with all of the default spans."""
for span in DEFAULT_SPANS:
smooth = smoother.perform_smooth(self.x, self.y, span)
self._primary_smooths.append(smooth) | python | def _compute_primary_smooths(self):
"""Compute fixed-span smooths with all of the default spans."""
for span in DEFAULT_SPANS:
smooth = smoother.perform_smooth(self.x, self.y, span)
self._primary_smooths.append(smooth) | [
"def",
"_compute_primary_smooths",
"(",
"self",
")",
":",
"for",
"span",
"in",
"DEFAULT_SPANS",
":",
"smooth",
"=",
"smoother",
".",
"perform_smooth",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"span",
")",
"self",
".",
"_primary_smooths",
".",
"... | Compute fixed-span smooths with all of the default spans. | [
"Compute",
"fixed",
"-",
"span",
"smooths",
"with",
"all",
"of",
"the",
"default",
"spans",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/supersmoother.py#L64-L68 | train | 42,164 |
partofthething/ace | ace/supersmoother.py | SuperSmoother._smooth_the_residuals | def _smooth_the_residuals(self):
"""
Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1]
"""
for primary_smooth in self._primary_smooths:
smooth = smoother.perform_smooth(self.x,
primary_smooth.cross_validated_residual,
MID_SPAN)
self._residual_smooths.append(smooth.smooth_result) | python | def _smooth_the_residuals(self):
"""
Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1]
"""
for primary_smooth in self._primary_smooths:
smooth = smoother.perform_smooth(self.x,
primary_smooth.cross_validated_residual,
MID_SPAN)
self._residual_smooths.append(smooth.smooth_result) | [
"def",
"_smooth_the_residuals",
"(",
"self",
")",
":",
"for",
"primary_smooth",
"in",
"self",
".",
"_primary_smooths",
":",
"smooth",
"=",
"smoother",
".",
"perform_smooth",
"(",
"self",
".",
"x",
",",
"primary_smooth",
".",
"cross_validated_residual",
",",
"MID... | Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1] | [
"Apply",
"the",
"MID_SPAN",
"to",
"the",
"residuals",
"of",
"the",
"primary",
"smooths",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/supersmoother.py#L70-L81 | train | 42,165 |
partofthething/ace | ace/supersmoother.py | SuperSmoother._smooth_best_span_estimates | def _smooth_best_span_estimates(self):
"""Apply a MID_SPAN smooth to the best span estimates at each observation."""
self._smoothed_best_spans = smoother.perform_smooth(self.x,
self._best_span_at_each_point,
MID_SPAN) | python | def _smooth_best_span_estimates(self):
"""Apply a MID_SPAN smooth to the best span estimates at each observation."""
self._smoothed_best_spans = smoother.perform_smooth(self.x,
self._best_span_at_each_point,
MID_SPAN) | [
"def",
"_smooth_best_span_estimates",
"(",
"self",
")",
":",
"self",
".",
"_smoothed_best_spans",
"=",
"smoother",
".",
"perform_smooth",
"(",
"self",
".",
"x",
",",
"self",
".",
"_best_span_at_each_point",
",",
"MID_SPAN",
")"
] | Apply a MID_SPAN smooth to the best span estimates at each observation. | [
"Apply",
"a",
"MID_SPAN",
"smooth",
"to",
"the",
"best",
"span",
"estimates",
"at",
"each",
"observation",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/supersmoother.py#L113-L117 | train | 42,166 |
partofthething/ace | ace/supersmoother.py | SuperSmoother._apply_best_spans_to_primaries | def _apply_best_spans_to_primaries(self):
"""
Apply best spans.
Given the best span, interpolate to compute the best smoothed value
at each observation.
"""
self.smooth_result = []
for xi, best_span in enumerate(self._smoothed_best_spans.smooth_result):
primary_values = [s.smooth_result[xi] for s in self._primary_smooths]
# pylint: disable=no-member
best_value = numpy.interp(best_span, DEFAULT_SPANS, primary_values)
self.smooth_result.append(best_value) | python | def _apply_best_spans_to_primaries(self):
"""
Apply best spans.
Given the best span, interpolate to compute the best smoothed value
at each observation.
"""
self.smooth_result = []
for xi, best_span in enumerate(self._smoothed_best_spans.smooth_result):
primary_values = [s.smooth_result[xi] for s in self._primary_smooths]
# pylint: disable=no-member
best_value = numpy.interp(best_span, DEFAULT_SPANS, primary_values)
self.smooth_result.append(best_value) | [
"def",
"_apply_best_spans_to_primaries",
"(",
"self",
")",
":",
"self",
".",
"smooth_result",
"=",
"[",
"]",
"for",
"xi",
",",
"best_span",
"in",
"enumerate",
"(",
"self",
".",
"_smoothed_best_spans",
".",
"smooth_result",
")",
":",
"primary_values",
"=",
"[",... | Apply best spans.
Given the best span, interpolate to compute the best smoothed value
at each observation. | [
"Apply",
"best",
"spans",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/supersmoother.py#L119-L131 | train | 42,167 |
partofthething/ace | ace/supersmoother.py | SuperSmoother._smooth_interpolated_smooth | def _smooth_interpolated_smooth(self):
"""
Smooth interpolated results with tweeter span.
A final step of the supersmoother is to smooth the interpolated values with
the tweeter span. This is done in Breiman's supsmu.f but is not explicitly
discussed in the publication. This step is necessary to match
the FORTRAN version perfectly.
"""
smoothed_results = smoother.perform_smooth(self.x,
self.smooth_result,
TWEETER_SPAN)
self.smooth_result = smoothed_results.smooth_result | python | def _smooth_interpolated_smooth(self):
"""
Smooth interpolated results with tweeter span.
A final step of the supersmoother is to smooth the interpolated values with
the tweeter span. This is done in Breiman's supsmu.f but is not explicitly
discussed in the publication. This step is necessary to match
the FORTRAN version perfectly.
"""
smoothed_results = smoother.perform_smooth(self.x,
self.smooth_result,
TWEETER_SPAN)
self.smooth_result = smoothed_results.smooth_result | [
"def",
"_smooth_interpolated_smooth",
"(",
"self",
")",
":",
"smoothed_results",
"=",
"smoother",
".",
"perform_smooth",
"(",
"self",
".",
"x",
",",
"self",
".",
"smooth_result",
",",
"TWEETER_SPAN",
")",
"self",
".",
"smooth_result",
"=",
"smoothed_results",
".... | Smooth interpolated results with tweeter span.
A final step of the supersmoother is to smooth the interpolated values with
the tweeter span. This is done in Breiman's supsmu.f but is not explicitly
discussed in the publication. This step is necessary to match
the FORTRAN version perfectly. | [
"Smooth",
"interpolated",
"results",
"with",
"tweeter",
"span",
"."
] | 1593a49f3c2e845514323e9c36ee253fe77bac3c | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/supersmoother.py#L133-L145 | train | 42,168 |
openstack/monasca-common | monasca_common/kafka_lib/consumer/multiprocess.py | _mp_consume | def _mp_consume(client, group, topic, queue, size, events, **consumer_options):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Initial interval for retries in seconds.
interval = 1
while not events.exit.is_set():
try:
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None,
**consumer_options)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
events.start.wait()
# If we are asked to quit, do so
if events.exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
while True:
try:
queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)
break
except queue.Full:
if events.exit.is_set():
break
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
events.pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
except KafkaError as e:
# Retry with exponential backoff
log.error(
"Problem communicating with Kafka (%s), retrying in %d seconds..." % (e, interval))
time.sleep(interval)
interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS | python | def _mp_consume(client, group, topic, queue, size, events, **consumer_options):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Initial interval for retries in seconds.
interval = 1
while not events.exit.is_set():
try:
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None,
**consumer_options)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
events.start.wait()
# If we are asked to quit, do so
if events.exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
while True:
try:
queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)
break
except queue.Full:
if events.exit.is_set():
break
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
events.pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
except KafkaError as e:
# Retry with exponential backoff
log.error(
"Problem communicating with Kafka (%s), retrying in %d seconds..." % (e, interval))
time.sleep(interval)
interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS | [
"def",
"_mp_consume",
"(",
"client",
",",
"group",
",",
"topic",
",",
"queue",
",",
"size",
",",
"events",
",",
"*",
"*",
"consumer_options",
")",
":",
"# Initial interval for retries in seconds.",
"interval",
"=",
"1",
"while",
"not",
"events",
".",
"exit",
... | A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class | [
"A",
"child",
"process",
"worker",
"which",
"consumes",
"messages",
"based",
"on",
"the",
"notifications",
"given",
"by",
"the",
"controller",
"process"
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/consumer/multiprocess.py#L40-L112 | train | 42,169 |
robotpy/pyfrc | lib/pyfrc/sim/field/elements.py | DrawableElement.rotate | def rotate(self, angle):
"""
This works. Rotates the object about its center.
Angle is specified in radians
"""
self.angle = (self.angle + angle) % (math.pi * 2.0)
# precalculate these parameters
c = math.cos(angle)
s = math.sin(angle)
px, py = self.center
def _rotate_point(xy):
x, y = xy
x = x - px
y = y - py
return (x * c - y * s) + px, (x * s + y * c) + py
# calculate rotation for each point
self.pts = [p for p in map(lambda x: _rotate_point(x), self.pts)] | python | def rotate(self, angle):
"""
This works. Rotates the object about its center.
Angle is specified in radians
"""
self.angle = (self.angle + angle) % (math.pi * 2.0)
# precalculate these parameters
c = math.cos(angle)
s = math.sin(angle)
px, py = self.center
def _rotate_point(xy):
x, y = xy
x = x - px
y = y - py
return (x * c - y * s) + px, (x * s + y * c) + py
# calculate rotation for each point
self.pts = [p for p in map(lambda x: _rotate_point(x), self.pts)] | [
"def",
"rotate",
"(",
"self",
",",
"angle",
")",
":",
"self",
".",
"angle",
"=",
"(",
"self",
".",
"angle",
"+",
"angle",
")",
"%",
"(",
"math",
".",
"pi",
"*",
"2.0",
")",
"# precalculate these parameters",
"c",
"=",
"math",
".",
"cos",
"(",
"angl... | This works. Rotates the object about its center.
Angle is specified in radians | [
"This",
"works",
".",
"Rotates",
"the",
"object",
"about",
"its",
"center",
".",
"Angle",
"is",
"specified",
"in",
"radians"
] | 7672ea3f17c8d4b702a9f18a7372d95feee7e37d | https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/field/elements.py#L55-L79 | train | 42,170 |
openstack/monasca-common | monasca_common/confluent_kafka/producer.py | KafkaProducer.publish | def publish(self, topic, messages, key=None, timeout=2):
"""
Publish messages to the topic.
:param str topic: Topic to produce messages to.
:param list(str) messages: List of message payloads.
:param str key: Message key.
:param float timeout: Maximum time to block in seconds.
:returns: Number of messages still in queue.
:rtype int
"""
if not isinstance(messages, list):
messages = [messages]
try:
for m in messages:
m = encodeutils.safe_encode(m, incoming='utf-8')
self._producer.produce(topic, m, key,
callback=KafkaProducer.delivery_report)
self._producer.poll(0)
return self._producer.flush(timeout)
except (BufferError, confluent_kafka.KafkaException,
NotImplementedError):
log.exception(u'Error publishing to {} topic.'.format(topic))
raise | python | def publish(self, topic, messages, key=None, timeout=2):
"""
Publish messages to the topic.
:param str topic: Topic to produce messages to.
:param list(str) messages: List of message payloads.
:param str key: Message key.
:param float timeout: Maximum time to block in seconds.
:returns: Number of messages still in queue.
:rtype int
"""
if not isinstance(messages, list):
messages = [messages]
try:
for m in messages:
m = encodeutils.safe_encode(m, incoming='utf-8')
self._producer.produce(topic, m, key,
callback=KafkaProducer.delivery_report)
self._producer.poll(0)
return self._producer.flush(timeout)
except (BufferError, confluent_kafka.KafkaException,
NotImplementedError):
log.exception(u'Error publishing to {} topic.'.format(topic))
raise | [
"def",
"publish",
"(",
"self",
",",
"topic",
",",
"messages",
",",
"key",
"=",
"None",
",",
"timeout",
"=",
"2",
")",
":",
"if",
"not",
"isinstance",
"(",
"messages",
",",
"list",
")",
":",
"messages",
"=",
"[",
"messages",
"]",
"try",
":",
"for",
... | Publish messages to the topic.
:param str topic: Topic to produce messages to.
:param list(str) messages: List of message payloads.
:param str key: Message key.
:param float timeout: Maximum time to block in seconds.
:returns: Number of messages still in queue.
:rtype int | [
"Publish",
"messages",
"to",
"the",
"topic",
"."
] | 61e2e00454734e2881611abec8df0d85bf7655ac | https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/confluent_kafka/producer.py#L57-L84 | train | 42,171 |
StagPython/StagPy | stagpy/misc.py | out_name | def out_name(stem, timestep=None):
"""Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``.
"""
if timestep is not None:
stem = (stem + INT_FMT).format(timestep)
return conf.core.outname + '_' + stem | python | def out_name(stem, timestep=None):
"""Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``.
"""
if timestep is not None:
stem = (stem + INT_FMT).format(timestep)
return conf.core.outname + '_' + stem | [
"def",
"out_name",
"(",
"stem",
",",
"timestep",
"=",
"None",
")",
":",
"if",
"timestep",
"is",
"not",
"None",
":",
"stem",
"=",
"(",
"stem",
"+",
"INT_FMT",
")",
".",
"format",
"(",
"timestep",
")",
"return",
"conf",
".",
"core",
".",
"outname",
"... | Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``. | [
"Return",
"StagPy",
"out",
"file",
"name",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L15-L31 | train | 42,172 |
StagPython/StagPy | stagpy/misc.py | saveplot | def saveplot(fig, *name_args, close=True, **name_kwargs):
"""Save matplotlib figure.
You need to provide :data:`stem` as a positional or keyword argument (see
:func:`out_name`).
Args:
fig (:class:`matplotlib.figure.Figure`): matplotlib figure.
close (bool): whether to close the figure.
name_args: positional arguments passed on to :func:`out_name`.
name_kwargs: keyword arguments passed on to :func:`out_name`.
"""
oname = out_name(*name_args, **name_kwargs)
fig.savefig('{}.{}'.format(oname, conf.plot.format),
format=conf.plot.format, bbox_inches='tight')
if close:
plt.close(fig) | python | def saveplot(fig, *name_args, close=True, **name_kwargs):
"""Save matplotlib figure.
You need to provide :data:`stem` as a positional or keyword argument (see
:func:`out_name`).
Args:
fig (:class:`matplotlib.figure.Figure`): matplotlib figure.
close (bool): whether to close the figure.
name_args: positional arguments passed on to :func:`out_name`.
name_kwargs: keyword arguments passed on to :func:`out_name`.
"""
oname = out_name(*name_args, **name_kwargs)
fig.savefig('{}.{}'.format(oname, conf.plot.format),
format=conf.plot.format, bbox_inches='tight')
if close:
plt.close(fig) | [
"def",
"saveplot",
"(",
"fig",
",",
"*",
"name_args",
",",
"close",
"=",
"True",
",",
"*",
"*",
"name_kwargs",
")",
":",
"oname",
"=",
"out_name",
"(",
"*",
"name_args",
",",
"*",
"*",
"name_kwargs",
")",
"fig",
".",
"savefig",
"(",
"'{}.{}'",
".",
... | Save matplotlib figure.
You need to provide :data:`stem` as a positional or keyword argument (see
:func:`out_name`).
Args:
fig (:class:`matplotlib.figure.Figure`): matplotlib figure.
close (bool): whether to close the figure.
name_args: positional arguments passed on to :func:`out_name`.
name_kwargs: keyword arguments passed on to :func:`out_name`. | [
"Save",
"matplotlib",
"figure",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L34-L50 | train | 42,173 |
StagPython/StagPy | stagpy/misc.py | baredoc | def baredoc(obj):
"""Return the first line of the docstring of an object.
Trailing periods and spaces as well as leading spaces are removed from the
output.
Args:
obj: any Python object.
Returns:
str: the first line of the docstring of obj.
"""
doc = getdoc(obj)
if not doc:
return ''
doc = doc.splitlines()[0]
return doc.rstrip(' .').lstrip() | python | def baredoc(obj):
"""Return the first line of the docstring of an object.
Trailing periods and spaces as well as leading spaces are removed from the
output.
Args:
obj: any Python object.
Returns:
str: the first line of the docstring of obj.
"""
doc = getdoc(obj)
if not doc:
return ''
doc = doc.splitlines()[0]
return doc.rstrip(' .').lstrip() | [
"def",
"baredoc",
"(",
"obj",
")",
":",
"doc",
"=",
"getdoc",
"(",
"obj",
")",
"if",
"not",
"doc",
":",
"return",
"''",
"doc",
"=",
"doc",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"return",
"doc",
".",
"rstrip",
"(",
"' .'",
")",
".",
"lstri... | Return the first line of the docstring of an object.
Trailing periods and spaces as well as leading spaces are removed from the
output.
Args:
obj: any Python object.
Returns:
str: the first line of the docstring of obj. | [
"Return",
"the",
"first",
"line",
"of",
"the",
"docstring",
"of",
"an",
"object",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L53-L68 | train | 42,174 |
StagPython/StagPy | stagpy/misc.py | fmttime | def fmttime(tin):
"""Return LaTeX expression with time in scientific notation.
Args:
tin (float): the time.
Returns:
str: the LaTeX expression.
"""
aaa, bbb = '{:.2e}'.format(tin).split('e')
bbb = int(bbb)
return r'$t={} \times 10^{{{}}}$'.format(aaa, bbb) | python | def fmttime(tin):
"""Return LaTeX expression with time in scientific notation.
Args:
tin (float): the time.
Returns:
str: the LaTeX expression.
"""
aaa, bbb = '{:.2e}'.format(tin).split('e')
bbb = int(bbb)
return r'$t={} \times 10^{{{}}}$'.format(aaa, bbb) | [
"def",
"fmttime",
"(",
"tin",
")",
":",
"aaa",
",",
"bbb",
"=",
"'{:.2e}'",
".",
"format",
"(",
"tin",
")",
".",
"split",
"(",
"'e'",
")",
"bbb",
"=",
"int",
"(",
"bbb",
")",
"return",
"r'$t={} \\times 10^{{{}}}$'",
".",
"format",
"(",
"aaa",
",",
... | Return LaTeX expression with time in scientific notation.
Args:
tin (float): the time.
Returns:
str: the LaTeX expression. | [
"Return",
"LaTeX",
"expression",
"with",
"time",
"in",
"scientific",
"notation",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L71-L81 | train | 42,175 |
StagPython/StagPy | stagpy/misc.py | list_of_vars | def list_of_vars(arg_plot):
"""Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures.
"""
lovs = [[[var for var in svars.split(',') if var]
for svars in pvars.split('.') if svars]
for pvars in arg_plot.split('-') if pvars]
lovs = [[slov for slov in lov if slov] for lov in lovs if lov]
return [lov for lov in lovs if lov] | python | def list_of_vars(arg_plot):
"""Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures.
"""
lovs = [[[var for var in svars.split(',') if var]
for svars in pvars.split('.') if svars]
for pvars in arg_plot.split('-') if pvars]
lovs = [[slov for slov in lov if slov] for lov in lovs if lov]
return [lov for lov in lovs if lov] | [
"def",
"list_of_vars",
"(",
"arg_plot",
")",
":",
"lovs",
"=",
"[",
"[",
"[",
"var",
"for",
"var",
"in",
"svars",
".",
"split",
"(",
"','",
")",
"if",
"var",
"]",
"for",
"svars",
"in",
"pvars",
".",
"split",
"(",
"'.'",
")",
"if",
"svars",
"]",
... | Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures. | [
"Construct",
"list",
"of",
"variables",
"per",
"plot",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L84-L101 | train | 42,176 |
StagPython/StagPy | stagpy/misc.py | set_of_vars | def set_of_vars(lovs):
"""Build set of variables from list.
Args:
lovs: nested lists of variables such as the one produced by
:func:`list_of_vars`.
Returns:
set of str: flattened set of all the variables present in the
nested lists.
"""
return set(var for pvars in lovs for svars in pvars for var in svars) | python | def set_of_vars(lovs):
"""Build set of variables from list.
Args:
lovs: nested lists of variables such as the one produced by
:func:`list_of_vars`.
Returns:
set of str: flattened set of all the variables present in the
nested lists.
"""
return set(var for pvars in lovs for svars in pvars for var in svars) | [
"def",
"set_of_vars",
"(",
"lovs",
")",
":",
"return",
"set",
"(",
"var",
"for",
"pvars",
"in",
"lovs",
"for",
"svars",
"in",
"pvars",
"for",
"var",
"in",
"svars",
")"
] | Build set of variables from list.
Args:
lovs: nested lists of variables such as the one produced by
:func:`list_of_vars`.
Returns:
set of str: flattened set of all the variables present in the
nested lists. | [
"Build",
"set",
"of",
"variables",
"from",
"list",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L104-L114 | train | 42,177 |
StagPython/StagPy | stagpy/misc.py | get_rbounds | def get_rbounds(step):
"""Radial or vertical position of boundaries.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of floats: radial or vertical positions of boundaries of the
domain.
"""
if step.geom is not None:
rcmb = step.geom.rcmb
else:
rcmb = step.sdat.par['geometry']['r_cmb']
if step.sdat.par['geometry']['shape'].lower() == 'cartesian':
rcmb = 0
rcmb = max(rcmb, 0)
return rcmb, rcmb + 1 | python | def get_rbounds(step):
"""Radial or vertical position of boundaries.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of floats: radial or vertical positions of boundaries of the
domain.
"""
if step.geom is not None:
rcmb = step.geom.rcmb
else:
rcmb = step.sdat.par['geometry']['r_cmb']
if step.sdat.par['geometry']['shape'].lower() == 'cartesian':
rcmb = 0
rcmb = max(rcmb, 0)
return rcmb, rcmb + 1 | [
"def",
"get_rbounds",
"(",
"step",
")",
":",
"if",
"step",
".",
"geom",
"is",
"not",
"None",
":",
"rcmb",
"=",
"step",
".",
"geom",
".",
"rcmb",
"else",
":",
"rcmb",
"=",
"step",
".",
"sdat",
".",
"par",
"[",
"'geometry'",
"]",
"[",
"'r_cmb'",
"]... | Radial or vertical position of boundaries.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of floats: radial or vertical positions of boundaries of the
domain. | [
"Radial",
"or",
"vertical",
"position",
"of",
"boundaries",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L117-L134 | train | 42,178 |
StagPython/StagPy | stagpy/misc.py | InchoateFiles.fnames | def fnames(self, names):
"""Ensure constant size of fnames"""
names = list(names[:len(self._fnames)])
self._fnames = names + self._fnames[len(names):] | python | def fnames(self, names):
"""Ensure constant size of fnames"""
names = list(names[:len(self._fnames)])
self._fnames = names + self._fnames[len(names):] | [
"def",
"fnames",
"(",
"self",
",",
"names",
")",
":",
"names",
"=",
"list",
"(",
"names",
"[",
":",
"len",
"(",
"self",
".",
"_fnames",
")",
"]",
")",
"self",
".",
"_fnames",
"=",
"names",
"+",
"self",
".",
"_fnames",
"[",
"len",
"(",
"names",
... | Ensure constant size of fnames | [
"Ensure",
"constant",
"size",
"of",
"fnames"
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L191-L194 | train | 42,179 |
StagPython/StagPy | stagpy/time_series.py | get_time_series | def get_time_series(sdat, var, tstart, tend):
"""Extract or compute and rescale a time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
var (str): time series name, a key of :data:`stagpy.phyvars.TIME`
or :data:`stagpy.phyvars.TIME_EXTRA`.
tstart (float): starting time of desired series. Set to None to start
at the beginning of available data.
tend (float): ending time of desired series. Set to None to stop at the
end of available data.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`:
series, time, meta
series is the requested time series, time the time at which it
is evaluated (set to None if it is the one of time series output
by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance
holding metadata of the requested variable.
"""
tseries = sdat.tseries_between(tstart, tend)
if var in tseries.columns:
series = tseries[var]
time = None
if var in phyvars.TIME:
meta = phyvars.TIME[var]
else:
meta = phyvars.Vart(var, None, '1')
elif var in phyvars.TIME_EXTRA:
meta = phyvars.TIME_EXTRA[var]
series, time = meta.description(sdat, tstart, tend)
meta = phyvars.Vart(misc.baredoc(meta.description),
meta.kind, meta.dim)
else:
raise UnknownTimeVarError(var)
series, _ = sdat.scale(series, meta.dim)
if time is not None:
time, _ = sdat.scale(time, 's')
return series, time, meta | python | def get_time_series(sdat, var, tstart, tend):
"""Extract or compute and rescale a time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
var (str): time series name, a key of :data:`stagpy.phyvars.TIME`
or :data:`stagpy.phyvars.TIME_EXTRA`.
tstart (float): starting time of desired series. Set to None to start
at the beginning of available data.
tend (float): ending time of desired series. Set to None to stop at the
end of available data.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`:
series, time, meta
series is the requested time series, time the time at which it
is evaluated (set to None if it is the one of time series output
by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance
holding metadata of the requested variable.
"""
tseries = sdat.tseries_between(tstart, tend)
if var in tseries.columns:
series = tseries[var]
time = None
if var in phyvars.TIME:
meta = phyvars.TIME[var]
else:
meta = phyvars.Vart(var, None, '1')
elif var in phyvars.TIME_EXTRA:
meta = phyvars.TIME_EXTRA[var]
series, time = meta.description(sdat, tstart, tend)
meta = phyvars.Vart(misc.baredoc(meta.description),
meta.kind, meta.dim)
else:
raise UnknownTimeVarError(var)
series, _ = sdat.scale(series, meta.dim)
if time is not None:
time, _ = sdat.scale(time, 's')
return series, time, meta | [
"def",
"get_time_series",
"(",
"sdat",
",",
"var",
",",
"tstart",
",",
"tend",
")",
":",
"tseries",
"=",
"sdat",
".",
"tseries_between",
"(",
"tstart",
",",
"tend",
")",
"if",
"var",
"in",
"tseries",
".",
"columns",
":",
"series",
"=",
"tseries",
"[",
... | Extract or compute and rescale a time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
var (str): time series name, a key of :data:`stagpy.phyvars.TIME`
or :data:`stagpy.phyvars.TIME_EXTRA`.
tstart (float): starting time of desired series. Set to None to start
at the beginning of available data.
tend (float): ending time of desired series. Set to None to stop at the
end of available data.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`:
series, time, meta
series is the requested time series, time the time at which it
is evaluated (set to None if it is the one of time series output
by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance
holding metadata of the requested variable. | [
"Extract",
"or",
"compute",
"and",
"rescale",
"a",
"time",
"series",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/time_series.py#L55-L93 | train | 42,180 |
StagPython/StagPy | stagpy/time_series.py | plot_time_series | def plot_time_series(sdat, lovs):
"""Plot requested time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of series names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.time.tstart: the starting time.
conf.time.tend: the ending time.
"""
sovs = misc.set_of_vars(lovs)
tseries = {}
times = {}
metas = {}
for tvar in sovs:
series, time, meta = get_time_series(
sdat, tvar, conf.time.tstart, conf.time.tend)
tseries[tvar] = series
metas[tvar] = meta
if time is not None:
times[tvar] = time
tseries['t'] = get_time_series(
sdat, 't', conf.time.tstart, conf.time.tend)[0]
_plot_time_list(sdat, lovs, tseries, metas, times) | python | def plot_time_series(sdat, lovs):
"""Plot requested time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of series names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.time.tstart: the starting time.
conf.time.tend: the ending time.
"""
sovs = misc.set_of_vars(lovs)
tseries = {}
times = {}
metas = {}
for tvar in sovs:
series, time, meta = get_time_series(
sdat, tvar, conf.time.tstart, conf.time.tend)
tseries[tvar] = series
metas[tvar] = meta
if time is not None:
times[tvar] = time
tseries['t'] = get_time_series(
sdat, 't', conf.time.tstart, conf.time.tend)[0]
_plot_time_list(sdat, lovs, tseries, metas, times) | [
"def",
"plot_time_series",
"(",
"sdat",
",",
"lovs",
")",
":",
"sovs",
"=",
"misc",
".",
"set_of_vars",
"(",
"lovs",
")",
"tseries",
"=",
"{",
"}",
"times",
"=",
"{",
"}",
"metas",
"=",
"{",
"}",
"for",
"tvar",
"in",
"sovs",
":",
"series",
",",
"... | Plot requested time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of series names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.time.tstart: the starting time.
conf.time.tend: the ending time. | [
"Plot",
"requested",
"time",
"series",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/time_series.py#L96-L122 | train | 42,181 |
StagPython/StagPy | stagpy/time_series.py | compstat | def compstat(sdat, tstart=None, tend=None):
"""Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data.
"""
data = sdat.tseries_between(tstart, tend)
time = data['t'].values
delta_time = time[-1] - time[0]
data = data.iloc[:, 1:].values # assume t is first column
mean = np.trapz(data, x=time, axis=0) / delta_time
rms = np.sqrt(np.trapz((data - mean)**2, x=time, axis=0) / delta_time)
with open(misc.out_name('statistics.dat'), 'w') as out_file:
mean.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n')
rms.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n') | python | def compstat(sdat, tstart=None, tend=None):
"""Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data.
"""
data = sdat.tseries_between(tstart, tend)
time = data['t'].values
delta_time = time[-1] - time[0]
data = data.iloc[:, 1:].values # assume t is first column
mean = np.trapz(data, x=time, axis=0) / delta_time
rms = np.sqrt(np.trapz((data - mean)**2, x=time, axis=0) / delta_time)
with open(misc.out_name('statistics.dat'), 'w') as out_file:
mean.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n')
rms.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n') | [
"def",
"compstat",
"(",
"sdat",
",",
"tstart",
"=",
"None",
",",
"tend",
"=",
"None",
")",
":",
"data",
"=",
"sdat",
".",
"tseries_between",
"(",
"tstart",
",",
"tend",
")",
"time",
"=",
"data",
"[",
"'t'",
"]",
".",
"values",
"delta_time",
"=",
"t... | Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data. | [
"Compute",
"statistics",
"from",
"series",
"output",
"by",
"StagYY",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/time_series.py#L125-L150 | train | 42,182 |
StagPython/StagPy | stagpy/time_series.py | cmd | def cmd():
"""Implementation of time subcommand.
Other Parameters:
conf.time
conf.core
"""
sdat = StagyyData(conf.core.path)
if sdat.tseries is None:
return
if conf.time.fraction is not None:
if not 0 < conf.time.fraction <= 1:
raise InvalidTimeFractionError(conf.time.fraction)
conf.time.tend = None
t_0 = sdat.tseries.iloc[0].loc['t']
t_f = sdat.tseries.iloc[-1].loc['t']
conf.time.tstart = (t_0 * conf.time.fraction +
t_f * (1 - conf.time.fraction))
lovs = misc.list_of_vars(conf.time.plot)
if lovs:
plot_time_series(sdat, lovs)
if conf.time.compstat:
compstat(sdat, conf.time.tstart, conf.time.tend) | python | def cmd():
"""Implementation of time subcommand.
Other Parameters:
conf.time
conf.core
"""
sdat = StagyyData(conf.core.path)
if sdat.tseries is None:
return
if conf.time.fraction is not None:
if not 0 < conf.time.fraction <= 1:
raise InvalidTimeFractionError(conf.time.fraction)
conf.time.tend = None
t_0 = sdat.tseries.iloc[0].loc['t']
t_f = sdat.tseries.iloc[-1].loc['t']
conf.time.tstart = (t_0 * conf.time.fraction +
t_f * (1 - conf.time.fraction))
lovs = misc.list_of_vars(conf.time.plot)
if lovs:
plot_time_series(sdat, lovs)
if conf.time.compstat:
compstat(sdat, conf.time.tstart, conf.time.tend) | [
"def",
"cmd",
"(",
")",
":",
"sdat",
"=",
"StagyyData",
"(",
"conf",
".",
"core",
".",
"path",
")",
"if",
"sdat",
".",
"tseries",
"is",
"None",
":",
"return",
"if",
"conf",
".",
"time",
".",
"fraction",
"is",
"not",
"None",
":",
"if",
"not",
"0",... | Implementation of time subcommand.
Other Parameters:
conf.time
conf.core | [
"Implementation",
"of",
"time",
"subcommand",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/time_series.py#L153-L178 | train | 42,183 |
StagPython/StagPy | stagpy/commands.py | info_cmd | def info_cmd():
"""Print basic information about StagYY run."""
sdat = stagyydata.StagyyData(conf.core.path)
lsnap = sdat.snaps.last
lstep = sdat.steps.last
print('StagYY run in {}'.format(sdat.path))
if lsnap.geom.threed:
dimension = '{} x {} x {}'.format(lsnap.geom.nxtot,
lsnap.geom.nytot,
lsnap.geom.nztot)
elif lsnap.geom.twod_xz:
dimension = '{} x {}'.format(lsnap.geom.nxtot,
lsnap.geom.nztot)
else:
dimension = '{} x {}'.format(lsnap.geom.nytot,
lsnap.geom.nztot)
if lsnap.geom.cartesian:
print('Cartesian', dimension)
elif lsnap.geom.cylindrical:
print('Cylindrical', dimension)
else:
print('Spherical', dimension)
print('Last timestep:',
' istep: {}'.format(lstep.istep),
' time: {}'.format(lstep.timeinfo['t']),
' <T>: {}'.format(lstep.timeinfo['Tmean']),
sep='\n')
print('Last snapshot (istep {}):'.format(lsnap.istep),
' isnap: {}'.format(lsnap.isnap),
' time: {}'.format(lsnap.timeinfo['t']),
' output fields: {}'.format(','.join(lsnap.fields)),
sep='\n') | python | def info_cmd():
"""Print basic information about StagYY run."""
sdat = stagyydata.StagyyData(conf.core.path)
lsnap = sdat.snaps.last
lstep = sdat.steps.last
print('StagYY run in {}'.format(sdat.path))
if lsnap.geom.threed:
dimension = '{} x {} x {}'.format(lsnap.geom.nxtot,
lsnap.geom.nytot,
lsnap.geom.nztot)
elif lsnap.geom.twod_xz:
dimension = '{} x {}'.format(lsnap.geom.nxtot,
lsnap.geom.nztot)
else:
dimension = '{} x {}'.format(lsnap.geom.nytot,
lsnap.geom.nztot)
if lsnap.geom.cartesian:
print('Cartesian', dimension)
elif lsnap.geom.cylindrical:
print('Cylindrical', dimension)
else:
print('Spherical', dimension)
print('Last timestep:',
' istep: {}'.format(lstep.istep),
' time: {}'.format(lstep.timeinfo['t']),
' <T>: {}'.format(lstep.timeinfo['Tmean']),
sep='\n')
print('Last snapshot (istep {}):'.format(lsnap.istep),
' isnap: {}'.format(lsnap.isnap),
' time: {}'.format(lsnap.timeinfo['t']),
' output fields: {}'.format(','.join(lsnap.fields)),
sep='\n') | [
"def",
"info_cmd",
"(",
")",
":",
"sdat",
"=",
"stagyydata",
".",
"StagyyData",
"(",
"conf",
".",
"core",
".",
"path",
")",
"lsnap",
"=",
"sdat",
".",
"snaps",
".",
"last",
"lstep",
"=",
"sdat",
".",
"steps",
".",
"last",
"print",
"(",
"'StagYY run i... | Print basic information about StagYY run. | [
"Print",
"basic",
"information",
"about",
"StagYY",
"run",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/commands.py#L17-L48 | train | 42,184 |
StagPython/StagPy | stagpy/commands.py | var_cmd | def var_cmd():
"""Print a list of available variables.
See :mod:`stagpy.phyvars` where the lists of variables organized by command
are defined.
"""
print_all = not any(val for _, val in conf.var.opt_vals_())
if print_all or conf.var.field:
print('field:')
_layout(phyvars.FIELD, phyvars.FIELD_EXTRA)
print()
if print_all or conf.var.sfield:
print('surface field:')
_layout(phyvars.SFIELD, {})
print()
if print_all or conf.var.rprof:
print('rprof:')
_layout(phyvars.RPROF, phyvars.RPROF_EXTRA)
print()
if print_all or conf.var.time:
print('time:')
_layout(phyvars.TIME, phyvars.TIME_EXTRA)
print()
if print_all or conf.var.plates:
print('plates:')
_layout(phyvars.PLATES, {}) | python | def var_cmd():
"""Print a list of available variables.
See :mod:`stagpy.phyvars` where the lists of variables organized by command
are defined.
"""
print_all = not any(val for _, val in conf.var.opt_vals_())
if print_all or conf.var.field:
print('field:')
_layout(phyvars.FIELD, phyvars.FIELD_EXTRA)
print()
if print_all or conf.var.sfield:
print('surface field:')
_layout(phyvars.SFIELD, {})
print()
if print_all or conf.var.rprof:
print('rprof:')
_layout(phyvars.RPROF, phyvars.RPROF_EXTRA)
print()
if print_all or conf.var.time:
print('time:')
_layout(phyvars.TIME, phyvars.TIME_EXTRA)
print()
if print_all or conf.var.plates:
print('plates:')
_layout(phyvars.PLATES, {}) | [
"def",
"var_cmd",
"(",
")",
":",
"print_all",
"=",
"not",
"any",
"(",
"val",
"for",
"_",
",",
"val",
"in",
"conf",
".",
"var",
".",
"opt_vals_",
"(",
")",
")",
"if",
"print_all",
"or",
"conf",
".",
"var",
".",
"field",
":",
"print",
"(",
"'field:... | Print a list of available variables.
See :mod:`stagpy.phyvars` where the lists of variables organized by command
are defined. | [
"Print",
"a",
"list",
"of",
"available",
"variables",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/commands.py#L101-L126 | train | 42,185 |
StagPython/StagPy | stagpy/commands.py | report_parsing_problems | def report_parsing_problems(parsing_out):
"""Output message about potential parsing problems."""
_, empty, faulty = parsing_out
if CONFIG_FILE in empty or CONFIG_FILE in faulty:
print('Unable to read global config file', CONFIG_FILE,
file=sys.stderr)
print('Please run stagpy config --create',
sep='\n', end='\n\n', file=sys.stderr)
if CONFIG_LOCAL in faulty:
print('Unable to read local config file', CONFIG_LOCAL,
file=sys.stderr)
print('Please run stagpy config --create_local',
sep='\n', end='\n\n', file=sys.stderr) | python | def report_parsing_problems(parsing_out):
"""Output message about potential parsing problems."""
_, empty, faulty = parsing_out
if CONFIG_FILE in empty or CONFIG_FILE in faulty:
print('Unable to read global config file', CONFIG_FILE,
file=sys.stderr)
print('Please run stagpy config --create',
sep='\n', end='\n\n', file=sys.stderr)
if CONFIG_LOCAL in faulty:
print('Unable to read local config file', CONFIG_LOCAL,
file=sys.stderr)
print('Please run stagpy config --create_local',
sep='\n', end='\n\n', file=sys.stderr) | [
"def",
"report_parsing_problems",
"(",
"parsing_out",
")",
":",
"_",
",",
"empty",
",",
"faulty",
"=",
"parsing_out",
"if",
"CONFIG_FILE",
"in",
"empty",
"or",
"CONFIG_FILE",
"in",
"faulty",
":",
"print",
"(",
"'Unable to read global config file'",
",",
"CONFIG_FI... | Output message about potential parsing problems. | [
"Output",
"message",
"about",
"potential",
"parsing",
"problems",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/commands.py#L137-L149 | train | 42,186 |
StagPython/StagPy | stagpy/commands.py | config_pp | def config_pp(subs):
"""Pretty print of configuration options.
Args:
subs (iterable of str): iterable with the list of conf sections to
print.
"""
print('(c|f): available only as CLI argument/in the config file',
end='\n\n')
for sub in subs:
hlp_lst = []
for opt, meta in conf[sub].defaults_():
if meta.cmd_arg ^ meta.conf_arg:
opt += ' (c)' if meta.cmd_arg else ' (f)'
hlp_lst.append((opt, meta.help))
if hlp_lst:
print('{}:'.format(sub))
_pretty_print(hlp_lst, sep=' -- ',
text_width=min(get_terminal_size().columns, 100))
print() | python | def config_pp(subs):
"""Pretty print of configuration options.
Args:
subs (iterable of str): iterable with the list of conf sections to
print.
"""
print('(c|f): available only as CLI argument/in the config file',
end='\n\n')
for sub in subs:
hlp_lst = []
for opt, meta in conf[sub].defaults_():
if meta.cmd_arg ^ meta.conf_arg:
opt += ' (c)' if meta.cmd_arg else ' (f)'
hlp_lst.append((opt, meta.help))
if hlp_lst:
print('{}:'.format(sub))
_pretty_print(hlp_lst, sep=' -- ',
text_width=min(get_terminal_size().columns, 100))
print() | [
"def",
"config_pp",
"(",
"subs",
")",
":",
"print",
"(",
"'(c|f): available only as CLI argument/in the config file'",
",",
"end",
"=",
"'\\n\\n'",
")",
"for",
"sub",
"in",
"subs",
":",
"hlp_lst",
"=",
"[",
"]",
"for",
"opt",
",",
"meta",
"in",
"conf",
"[",
... | Pretty print of configuration options.
Args:
subs (iterable of str): iterable with the list of conf sections to
print. | [
"Pretty",
"print",
"of",
"configuration",
"options",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/commands.py#L152-L171 | train | 42,187 |
StagPython/StagPy | stagpy/commands.py | config_cmd | def config_cmd():
"""Configuration handling.
Other Parameters:
conf.config
"""
if not (conf.common.config or conf.config.create or
conf.config.create_local or conf.config.update or
conf.config.edit):
config_pp(conf.sections_())
loam.tools.config_cmd_handler(conf) | python | def config_cmd():
"""Configuration handling.
Other Parameters:
conf.config
"""
if not (conf.common.config or conf.config.create or
conf.config.create_local or conf.config.update or
conf.config.edit):
config_pp(conf.sections_())
loam.tools.config_cmd_handler(conf) | [
"def",
"config_cmd",
"(",
")",
":",
"if",
"not",
"(",
"conf",
".",
"common",
".",
"config",
"or",
"conf",
".",
"config",
".",
"create",
"or",
"conf",
".",
"config",
".",
"create_local",
"or",
"conf",
".",
"config",
".",
"update",
"or",
"conf",
".",
... | Configuration handling.
Other Parameters:
conf.config | [
"Configuration",
"handling",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/commands.py#L174-L184 | train | 42,188 |
StagPython/StagPy | stagpy/rprof.py | get_rprof | def get_rprof(step, var):
"""Extract or compute and rescale requested radial profile.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): radial profile name, a key of :data:`stagpy.phyvars.RPROF`
or :data:`stagpy.phyvars.RPROF_EXTRA`.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Varr`:
rprof, rad, meta
rprof is the requested profile, rad the radial position at which it
is evaluated (set to None if it is the position of profiles output
by StagYY), and meta is a :class:`stagpy.phyvars.Varr` instance
holding metadata of the requested variable.
"""
if var in step.rprof.columns:
rprof = step.rprof[var]
rad = None
if var in phyvars.RPROF:
meta = phyvars.RPROF[var]
else:
meta = phyvars.Varr(var, None, '1')
elif var in phyvars.RPROF_EXTRA:
meta = phyvars.RPROF_EXTRA[var]
rprof, rad = meta.description(step)
meta = phyvars.Varr(misc.baredoc(meta.description),
meta.kind, meta.dim)
else:
raise UnknownRprofVarError(var)
rprof, _ = step.sdat.scale(rprof, meta.dim)
if rad is not None:
rad, _ = step.sdat.scale(rad, 'm')
return rprof, rad, meta | python | def get_rprof(step, var):
"""Extract or compute and rescale requested radial profile.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): radial profile name, a key of :data:`stagpy.phyvars.RPROF`
or :data:`stagpy.phyvars.RPROF_EXTRA`.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Varr`:
rprof, rad, meta
rprof is the requested profile, rad the radial position at which it
is evaluated (set to None if it is the position of profiles output
by StagYY), and meta is a :class:`stagpy.phyvars.Varr` instance
holding metadata of the requested variable.
"""
if var in step.rprof.columns:
rprof = step.rprof[var]
rad = None
if var in phyvars.RPROF:
meta = phyvars.RPROF[var]
else:
meta = phyvars.Varr(var, None, '1')
elif var in phyvars.RPROF_EXTRA:
meta = phyvars.RPROF_EXTRA[var]
rprof, rad = meta.description(step)
meta = phyvars.Varr(misc.baredoc(meta.description),
meta.kind, meta.dim)
else:
raise UnknownRprofVarError(var)
rprof, _ = step.sdat.scale(rprof, meta.dim)
if rad is not None:
rad, _ = step.sdat.scale(rad, 'm')
return rprof, rad, meta | [
"def",
"get_rprof",
"(",
"step",
",",
"var",
")",
":",
"if",
"var",
"in",
"step",
".",
"rprof",
".",
"columns",
":",
"rprof",
"=",
"step",
".",
"rprof",
"[",
"var",
"]",
"rad",
"=",
"None",
"if",
"var",
"in",
"phyvars",
".",
"RPROF",
":",
"meta",... | Extract or compute and rescale requested radial profile.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): radial profile name, a key of :data:`stagpy.phyvars.RPROF`
or :data:`stagpy.phyvars.RPROF_EXTRA`.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Varr`:
rprof, rad, meta
rprof is the requested profile, rad the radial position at which it
is evaluated (set to None if it is the position of profiles output
by StagYY), and meta is a :class:`stagpy.phyvars.Varr` instance
holding metadata of the requested variable. | [
"Extract",
"or",
"compute",
"and",
"rescale",
"requested",
"radial",
"profile",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/rprof.py#L54-L88 | train | 42,189 |
StagPython/StagPy | stagpy/rprof.py | plot_grid | def plot_grid(step):
"""Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
"""
rad = get_rprof(step, 'r')[0]
drad = get_rprof(step, 'dr')[0]
_, unit = step.sdat.scale(1, 'm')
if unit:
unit = ' ({})'.format(unit)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(rad, '-ko')
ax1.set_ylabel('$r$' + unit)
ax2.plot(drad, '-ko')
ax2.set_ylabel('$dr$' + unit)
ax2.set_xlim([-0.5, len(rad) - 0.5])
ax2.set_xlabel('Cell number')
misc.saveplot(fig, 'grid', step.istep) | python | def plot_grid(step):
"""Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
"""
rad = get_rprof(step, 'r')[0]
drad = get_rprof(step, 'dr')[0]
_, unit = step.sdat.scale(1, 'm')
if unit:
unit = ' ({})'.format(unit)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(rad, '-ko')
ax1.set_ylabel('$r$' + unit)
ax2.plot(drad, '-ko')
ax2.set_ylabel('$dr$' + unit)
ax2.set_xlim([-0.5, len(rad) - 0.5])
ax2.set_xlabel('Cell number')
misc.saveplot(fig, 'grid', step.istep) | [
"def",
"plot_grid",
"(",
"step",
")",
":",
"rad",
"=",
"get_rprof",
"(",
"step",
",",
"'r'",
")",
"[",
"0",
"]",
"drad",
"=",
"get_rprof",
"(",
"step",
",",
"'dr'",
")",
"[",
"0",
"]",
"_",
",",
"unit",
"=",
"step",
".",
"sdat",
".",
"scale",
... | Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance. | [
"Plot",
"cell",
"position",
"and",
"thickness",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/rprof.py#L91-L112 | train | 42,190 |
StagPython/StagPy | stagpy/rprof.py | plot_average | def plot_average(sdat, lovs):
"""Plot time averaged profiles.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps.
"""
steps_iter = iter(sdat.walk.filter(rprof=True))
try:
step = next(steps_iter)
except StopIteration:
return
sovs = misc.set_of_vars(lovs)
istart = step.istep
nprofs = 1
rprof_averaged = {}
rads = {}
metas = {}
# assume constant z spacing for the moment
for rvar in sovs:
rprof_averaged[rvar], rad, metas[rvar] = get_rprof(step, rvar)
if rad is not None:
rads[rvar] = rad
for step in steps_iter:
nprofs += 1
for rvar in sovs:
rprof_averaged[rvar] += get_rprof(step, rvar)[0]
ilast = step.istep
for rvar in sovs:
rprof_averaged[rvar] /= nprofs
rcmb, rsurf = misc.get_rbounds(step)
rprof_averaged['bounds'] = (step.sdat.scale(rcmb, 'm')[0],
step.sdat.scale(rsurf, 'm')[0])
rprof_averaged['r'] = get_rprof(step, 'r')[0] + rprof_averaged['bounds'][0]
stepstr = '{}_{}'.format(istart, ilast)
_plot_rprof_list(sdat, lovs, rprof_averaged, metas, stepstr, rads) | python | def plot_average(sdat, lovs):
"""Plot time averaged profiles.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps.
"""
steps_iter = iter(sdat.walk.filter(rprof=True))
try:
step = next(steps_iter)
except StopIteration:
return
sovs = misc.set_of_vars(lovs)
istart = step.istep
nprofs = 1
rprof_averaged = {}
rads = {}
metas = {}
# assume constant z spacing for the moment
for rvar in sovs:
rprof_averaged[rvar], rad, metas[rvar] = get_rprof(step, rvar)
if rad is not None:
rads[rvar] = rad
for step in steps_iter:
nprofs += 1
for rvar in sovs:
rprof_averaged[rvar] += get_rprof(step, rvar)[0]
ilast = step.istep
for rvar in sovs:
rprof_averaged[rvar] /= nprofs
rcmb, rsurf = misc.get_rbounds(step)
rprof_averaged['bounds'] = (step.sdat.scale(rcmb, 'm')[0],
step.sdat.scale(rsurf, 'm')[0])
rprof_averaged['r'] = get_rprof(step, 'r')[0] + rprof_averaged['bounds'][0]
stepstr = '{}_{}'.format(istart, ilast)
_plot_rprof_list(sdat, lovs, rprof_averaged, metas, stepstr, rads) | [
"def",
"plot_average",
"(",
"sdat",
",",
"lovs",
")",
":",
"steps_iter",
"=",
"iter",
"(",
"sdat",
".",
"walk",
".",
"filter",
"(",
"rprof",
"=",
"True",
")",
")",
"try",
":",
"step",
"=",
"next",
"(",
"steps_iter",
")",
"except",
"StopIteration",
":... | Plot time averaged profiles.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps. | [
"Plot",
"time",
"averaged",
"profiles",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/rprof.py#L115-L162 | train | 42,191 |
StagPython/StagPy | stagpy/rprof.py | plot_every_step | def plot_every_step(sdat, lovs):
"""Plot profiles at each time step.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps.
"""
sovs = misc.set_of_vars(lovs)
for step in sdat.walk.filter(rprof=True):
rprofs = {}
rads = {}
metas = {}
for rvar in sovs:
rprof, rad, meta = get_rprof(step, rvar)
rprofs[rvar] = rprof
metas[rvar] = meta
if rad is not None:
rads[rvar] = rad
rprofs['bounds'] = misc.get_rbounds(step)
rcmb, rsurf = misc.get_rbounds(step)
rprofs['bounds'] = (step.sdat.scale(rcmb, 'm')[0],
step.sdat.scale(rsurf, 'm')[0])
rprofs['r'] = get_rprof(step, 'r')[0] + rprofs['bounds'][0]
stepstr = str(step.istep)
_plot_rprof_list(sdat, lovs, rprofs, metas, stepstr, rads) | python | def plot_every_step(sdat, lovs):
"""Plot profiles at each time step.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps.
"""
sovs = misc.set_of_vars(lovs)
for step in sdat.walk.filter(rprof=True):
rprofs = {}
rads = {}
metas = {}
for rvar in sovs:
rprof, rad, meta = get_rprof(step, rvar)
rprofs[rvar] = rprof
metas[rvar] = meta
if rad is not None:
rads[rvar] = rad
rprofs['bounds'] = misc.get_rbounds(step)
rcmb, rsurf = misc.get_rbounds(step)
rprofs['bounds'] = (step.sdat.scale(rcmb, 'm')[0],
step.sdat.scale(rsurf, 'm')[0])
rprofs['r'] = get_rprof(step, 'r')[0] + rprofs['bounds'][0]
stepstr = str(step.istep)
_plot_rprof_list(sdat, lovs, rprofs, metas, stepstr, rads) | [
"def",
"plot_every_step",
"(",
"sdat",
",",
"lovs",
")",
":",
"sovs",
"=",
"misc",
".",
"set_of_vars",
"(",
"lovs",
")",
"for",
"step",
"in",
"sdat",
".",
"walk",
".",
"filter",
"(",
"rprof",
"=",
"True",
")",
":",
"rprofs",
"=",
"{",
"}",
"rads",
... | Plot profiles at each time step.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps. | [
"Plot",
"profiles",
"at",
"each",
"time",
"step",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/rprof.py#L165-L196 | train | 42,192 |
StagPython/StagPy | stagpy/rprof.py | cmd | def cmd():
"""Implementation of rprof subcommand.
Other Parameters:
conf.rprof
conf.core
"""
sdat = StagyyData(conf.core.path)
if sdat.rprof is None:
return
if conf.rprof.grid:
for step in sdat.walk.filter(rprof=True):
plot_grid(step)
lovs = misc.list_of_vars(conf.rprof.plot)
if not lovs:
return
if conf.rprof.average:
plot_average(sdat, lovs)
else:
plot_every_step(sdat, lovs) | python | def cmd():
"""Implementation of rprof subcommand.
Other Parameters:
conf.rprof
conf.core
"""
sdat = StagyyData(conf.core.path)
if sdat.rprof is None:
return
if conf.rprof.grid:
for step in sdat.walk.filter(rprof=True):
plot_grid(step)
lovs = misc.list_of_vars(conf.rprof.plot)
if not lovs:
return
if conf.rprof.average:
plot_average(sdat, lovs)
else:
plot_every_step(sdat, lovs) | [
"def",
"cmd",
"(",
")",
":",
"sdat",
"=",
"StagyyData",
"(",
"conf",
".",
"core",
".",
"path",
")",
"if",
"sdat",
".",
"rprof",
"is",
"None",
":",
"return",
"if",
"conf",
".",
"rprof",
".",
"grid",
":",
"for",
"step",
"in",
"sdat",
".",
"walk",
... | Implementation of rprof subcommand.
Other Parameters:
conf.rprof
conf.core | [
"Implementation",
"of",
"rprof",
"subcommand",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/rprof.py#L199-L221 | train | 42,193 |
StagPython/StagPy | stagpy/__main__.py | main | def main():
"""StagPy entry point"""
if not DEBUG:
signal.signal(signal.SIGINT, sigint_handler)
warnings.simplefilter('ignore')
args = importlib.import_module('stagpy.args')
error = importlib.import_module('stagpy.error')
try:
args.parse_args()()
except error.StagpyError as err:
if DEBUG:
raise
print('Oops! StagPy encountered the following problem while '
'processing your request.',
'Please check the path to your simulation and the command line '
'arguments.', '',
'{}: {}'.format(err.__class__.__name__, err),
sep='\n', file=sys.stderr)
sys.exit() | python | def main():
"""StagPy entry point"""
if not DEBUG:
signal.signal(signal.SIGINT, sigint_handler)
warnings.simplefilter('ignore')
args = importlib.import_module('stagpy.args')
error = importlib.import_module('stagpy.error')
try:
args.parse_args()()
except error.StagpyError as err:
if DEBUG:
raise
print('Oops! StagPy encountered the following problem while '
'processing your request.',
'Please check the path to your simulation and the command line '
'arguments.', '',
'{}: {}'.format(err.__class__.__name__, err),
sep='\n', file=sys.stderr)
sys.exit() | [
"def",
"main",
"(",
")",
":",
"if",
"not",
"DEBUG",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"sigint_handler",
")",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
")",
"args",
"=",
"importlib",
".",
"import_module",
"(",
"'stag... | StagPy entry point | [
"StagPy",
"entry",
"point"
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/__main__.py#L11-L29 | train | 42,194 |
StagPython/StagPy | stagpy/parfile.py | _enrich_with_par | def _enrich_with_par(par_nml, par_file):
"""Enrich a par namelist with the content of a file."""
par_new = f90nml.read(str(par_file))
for section, content in par_new.items():
if section not in par_nml:
par_nml[section] = {}
for par, value in content.items():
try:
content[par] = value.strip()
except AttributeError:
pass
par_nml[section].update(content) | python | def _enrich_with_par(par_nml, par_file):
"""Enrich a par namelist with the content of a file."""
par_new = f90nml.read(str(par_file))
for section, content in par_new.items():
if section not in par_nml:
par_nml[section] = {}
for par, value in content.items():
try:
content[par] = value.strip()
except AttributeError:
pass
par_nml[section].update(content) | [
"def",
"_enrich_with_par",
"(",
"par_nml",
",",
"par_file",
")",
":",
"par_new",
"=",
"f90nml",
".",
"read",
"(",
"str",
"(",
"par_file",
")",
")",
"for",
"section",
",",
"content",
"in",
"par_new",
".",
"items",
"(",
")",
":",
"if",
"section",
"not",
... | Enrich a par namelist with the content of a file. | [
"Enrich",
"a",
"par",
"namelist",
"with",
"the",
"content",
"of",
"a",
"file",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/parfile.py#L623-L634 | train | 42,195 |
StagPython/StagPy | stagpy/parfile.py | readpar | def readpar(par_file, root):
"""Read StagYY par file.
The namelist is populated in chronological order with:
- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
- :data:`PAR_DFLT_FILE`, the global configuration par file;
- ``par_name_defaultparameters`` if it is defined in ``par_file``;
- ``par_file`` itself;
- ``parameters.dat`` if it can be found in the StagYY output directories.
Args:
par_file (:class:`pathlib.Path`): path of par file.
root (:class:`pathlib.Path`): path on which other paths are rooted.
This is usually par.parent.
Returns:
:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
values with first key being the namelist and second key the variables'
name.
"""
par_nml = deepcopy(PAR_DEFAULT)
if PAR_DFLT_FILE.is_file():
_enrich_with_par(par_nml, PAR_DFLT_FILE)
else:
PAR_DFLT_FILE.parent.mkdir(exist_ok=True)
f90nml.write(par_nml, str(PAR_DFLT_FILE))
if not par_file.is_file():
raise NoParFileError(par_file)
par_main = f90nml.read(str(par_file))
if 'default_parameters_parfile' in par_main:
par_dflt = par_main['default_parameters_parfile'].get(
'par_name_defaultparameters', 'par_defaults')
par_dflt = root / par_dflt
if not par_dflt.is_file():
raise NoParFileError(par_dflt)
_enrich_with_par(par_nml, par_dflt)
_enrich_with_par(par_nml, par_file)
par_out = root / par_nml['ioin']['output_file_stem'] / '_parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
par_out = root / par_nml['ioin']['hdf5_output_folder'] / 'parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
return par_nml | python | def readpar(par_file, root):
"""Read StagYY par file.
The namelist is populated in chronological order with:
- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
- :data:`PAR_DFLT_FILE`, the global configuration par file;
- ``par_name_defaultparameters`` if it is defined in ``par_file``;
- ``par_file`` itself;
- ``parameters.dat`` if it can be found in the StagYY output directories.
Args:
par_file (:class:`pathlib.Path`): path of par file.
root (:class:`pathlib.Path`): path on which other paths are rooted.
This is usually par.parent.
Returns:
:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
values with first key being the namelist and second key the variables'
name.
"""
par_nml = deepcopy(PAR_DEFAULT)
if PAR_DFLT_FILE.is_file():
_enrich_with_par(par_nml, PAR_DFLT_FILE)
else:
PAR_DFLT_FILE.parent.mkdir(exist_ok=True)
f90nml.write(par_nml, str(PAR_DFLT_FILE))
if not par_file.is_file():
raise NoParFileError(par_file)
par_main = f90nml.read(str(par_file))
if 'default_parameters_parfile' in par_main:
par_dflt = par_main['default_parameters_parfile'].get(
'par_name_defaultparameters', 'par_defaults')
par_dflt = root / par_dflt
if not par_dflt.is_file():
raise NoParFileError(par_dflt)
_enrich_with_par(par_nml, par_dflt)
_enrich_with_par(par_nml, par_file)
par_out = root / par_nml['ioin']['output_file_stem'] / '_parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
par_out = root / par_nml['ioin']['hdf5_output_folder'] / 'parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
return par_nml | [
"def",
"readpar",
"(",
"par_file",
",",
"root",
")",
":",
"par_nml",
"=",
"deepcopy",
"(",
"PAR_DEFAULT",
")",
"if",
"PAR_DFLT_FILE",
".",
"is_file",
"(",
")",
":",
"_enrich_with_par",
"(",
"par_nml",
",",
"PAR_DFLT_FILE",
")",
"else",
":",
"PAR_DFLT_FILE",
... | Read StagYY par file.
The namelist is populated in chronological order with:
- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
- :data:`PAR_DFLT_FILE`, the global configuration par file;
- ``par_name_defaultparameters`` if it is defined in ``par_file``;
- ``par_file`` itself;
- ``parameters.dat`` if it can be found in the StagYY output directories.
Args:
par_file (:class:`pathlib.Path`): path of par file.
root (:class:`pathlib.Path`): path on which other paths are rooted.
This is usually par.parent.
Returns:
:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
values with first key being the namelist and second key the variables'
name. | [
"Read",
"StagYY",
"par",
"file",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/parfile.py#L637-L685 | train | 42,196 |
StagPython/StagPy | stagpy/field.py | get_meshes_fld | def get_meshes_fld(step, var):
"""Return scalar field along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): scalar field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fld
2D arrays containing respectively the x position, y position, and
the value of the requested field.
"""
fld = step.fields[var]
if step.geom.twod_xz:
xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]
fld = fld[:, 0, :, 0]
elif step.geom.cartesian and step.geom.twod_yz:
xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]
fld = fld[0, :, :, 0]
else: # spherical yz
xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]
fld = fld[0, :, :, 0]
return xmesh, ymesh, fld | python | def get_meshes_fld(step, var):
"""Return scalar field along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): scalar field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fld
2D arrays containing respectively the x position, y position, and
the value of the requested field.
"""
fld = step.fields[var]
if step.geom.twod_xz:
xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]
fld = fld[:, 0, :, 0]
elif step.geom.cartesian and step.geom.twod_yz:
xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]
fld = fld[0, :, :, 0]
else: # spherical yz
xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]
fld = fld[0, :, :, 0]
return xmesh, ymesh, fld | [
"def",
"get_meshes_fld",
"(",
"step",
",",
"var",
")",
":",
"fld",
"=",
"step",
".",
"fields",
"[",
"var",
"]",
"if",
"step",
".",
"geom",
".",
"twod_xz",
":",
"xmesh",
",",
"ymesh",
"=",
"step",
".",
"geom",
".",
"x_mesh",
"[",
":",
",",
"0",
... | Return scalar field along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): scalar field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fld
2D arrays containing respectively the x position, y position, and
the value of the requested field. | [
"Return",
"scalar",
"field",
"along",
"with",
"coordinates",
"meshes",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/field.py#L27-L51 | train | 42,197 |
StagPython/StagPy | stagpy/field.py | get_meshes_vec | def get_meshes_vec(step, var):
"""Return vector field components along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): vector field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy
2D arrays containing respectively the x position, y position, x
component and y component of the requested vector field.
"""
if step.geom.twod_xz:
xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]
vec1 = step.fields[var + '1'][:, 0, :, 0]
vec2 = step.fields[var + '3'][:, 0, :, 0]
elif step.geom.cartesian and step.geom.twod_yz:
xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]
vec1 = step.fields[var + '2'][0, :, :, 0]
vec2 = step.fields[var + '3'][0, :, :, 0]
else: # spherical yz
xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]
pmesh = step.geom.p_mesh[0, :, :]
vec_phi = step.fields[var + '2'][0, :, :, 0]
vec_r = step.fields[var + '3'][0, :, :, 0]
vec1 = vec_r * np.cos(pmesh) - vec_phi * np.sin(pmesh)
vec2 = vec_phi * np.cos(pmesh) + vec_r * np.sin(pmesh)
return xmesh, ymesh, vec1, vec2 | python | def get_meshes_vec(step, var):
"""Return vector field components along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): vector field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy
2D arrays containing respectively the x position, y position, x
component and y component of the requested vector field.
"""
if step.geom.twod_xz:
xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]
vec1 = step.fields[var + '1'][:, 0, :, 0]
vec2 = step.fields[var + '3'][:, 0, :, 0]
elif step.geom.cartesian and step.geom.twod_yz:
xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]
vec1 = step.fields[var + '2'][0, :, :, 0]
vec2 = step.fields[var + '3'][0, :, :, 0]
else: # spherical yz
xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]
pmesh = step.geom.p_mesh[0, :, :]
vec_phi = step.fields[var + '2'][0, :, :, 0]
vec_r = step.fields[var + '3'][0, :, :, 0]
vec1 = vec_r * np.cos(pmesh) - vec_phi * np.sin(pmesh)
vec2 = vec_phi * np.cos(pmesh) + vec_r * np.sin(pmesh)
return xmesh, ymesh, vec1, vec2 | [
"def",
"get_meshes_vec",
"(",
"step",
",",
"var",
")",
":",
"if",
"step",
".",
"geom",
".",
"twod_xz",
":",
"xmesh",
",",
"ymesh",
"=",
"step",
".",
"geom",
".",
"x_mesh",
"[",
":",
",",
"0",
",",
":",
"]",
",",
"step",
".",
"geom",
".",
"z_mes... | Return vector field components along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): vector field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy
2D arrays containing respectively the x position, y position, x
component and y component of the requested vector field. | [
"Return",
"vector",
"field",
"components",
"along",
"with",
"coordinates",
"meshes",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/field.py#L54-L83 | train | 42,198 |
StagPython/StagPy | stagpy/field.py | set_of_vars | def set_of_vars(arg_plot):
"""Build set of needed field variables.
Each var is a tuple, first component is a scalar field, second component is
either:
- a scalar field, isocontours are added to the plot.
- a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to
the plot.
Args:
arg_plot (str): string with variable names separated with
``,`` (figures), and ``+`` (same plot).
Returns:
set of str: set of needed field variables.
"""
sovs = set(tuple((var + '+').split('+')[:2])
for var in arg_plot.split(','))
sovs.discard(('', ''))
return sovs | python | def set_of_vars(arg_plot):
"""Build set of needed field variables.
Each var is a tuple, first component is a scalar field, second component is
either:
- a scalar field, isocontours are added to the plot.
- a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to
the plot.
Args:
arg_plot (str): string with variable names separated with
``,`` (figures), and ``+`` (same plot).
Returns:
set of str: set of needed field variables.
"""
sovs = set(tuple((var + '+').split('+')[:2])
for var in arg_plot.split(','))
sovs.discard(('', ''))
return sovs | [
"def",
"set_of_vars",
"(",
"arg_plot",
")",
":",
"sovs",
"=",
"set",
"(",
"tuple",
"(",
"(",
"var",
"+",
"'+'",
")",
".",
"split",
"(",
"'+'",
")",
"[",
":",
"2",
"]",
")",
"for",
"var",
"in",
"arg_plot",
".",
"split",
"(",
"','",
")",
")",
"... | Build set of needed field variables.
Each var is a tuple, first component is a scalar field, second component is
either:
- a scalar field, isocontours are added to the plot.
- a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to
the plot.
Args:
arg_plot (str): string with variable names separated with
``,`` (figures), and ``+`` (same plot).
Returns:
set of str: set of needed field variables. | [
"Build",
"set",
"of",
"needed",
"field",
"variables",
"."
] | 18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4 | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/field.py#L86-L105 | train | 42,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.