body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
bedf526d7b44648d1573305d4c50132168ecbe176beccc9c19d0648ddcc10396 | def remove_aresta(self, a):
'\n Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n '
if self.arestaValida(a):
if self.existeAresta(a):
i_a1 = self.__indice_primeiro_vertice_aresta(a)
i_a2 = self.__indice_segundo_vertice_aresta(a)
if (i_a1 < i_a2):
self.M[i_a1][i_a2] -= 1
else:
self.M[i_a2][i_a1] -= 1
else:
raise ArestaInvalidaException('A aresta {} é inválida'.format(a)) | Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice
:param a: a aresta no formato correto
:raise: lança uma exceção caso a aresta não estiver em um formato válido | Graphs/adjacent undirected graph/grafo_adj_nao_dir.py | remove_aresta | lucasEngdComp/graphs | 0 | python | def remove_aresta(self, a):
'\n Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n '
if self.arestaValida(a):
if self.existeAresta(a):
i_a1 = self.__indice_primeiro_vertice_aresta(a)
i_a2 = self.__indice_segundo_vertice_aresta(a)
if (i_a1 < i_a2):
self.M[i_a1][i_a2] -= 1
else:
self.M[i_a2][i_a1] -= 1
else:
raise ArestaInvalidaException('A aresta {} é inválida'.format(a)) | def remove_aresta(self, a):
'\n Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n '
if self.arestaValida(a):
if self.existeAresta(a):
i_a1 = self.__indice_primeiro_vertice_aresta(a)
i_a2 = self.__indice_segundo_vertice_aresta(a)
if (i_a1 < i_a2):
self.M[i_a1][i_a2] -= 1
else:
self.M[i_a2][i_a1] -= 1
else:
raise ArestaInvalidaException('A aresta {} é inválida'.format(a))<|docstring|>Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice
:param a: a aresta no formato correto
:raise: lança uma exceção caso a aresta não estiver em um formato válido<|endoftext|> |
f160a8374fd9943aa32af1f4353a6df5c2b98d3958debc04d24354239a6b2cc6 | def __str__(self):
'\n Fornece uma representação do tipo String do grafo.\n O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.\n :return: Uma string que representa o grafo\n '
espaco = (' ' * self.__maior_vertice)
grafo_str = (espaco + ' ')
for v in range(len(self.N)):
grafo_str += self.N[v]
if (v < (len(self.N) - 1)):
grafo_str += ' '
grafo_str += '\n'
for l in range(len(self.M)):
grafo_str += (self.N[l] + ' ')
for c in range(len(self.M)):
grafo_str += (str(self.M[l][c]) + ' ')
grafo_str += '\n'
return grafo_str | Fornece uma representação do tipo String do grafo.
O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.
:return: Uma string que representa o grafo | Graphs/adjacent undirected graph/grafo_adj_nao_dir.py | __str__ | lucasEngdComp/graphs | 0 | python | def __str__(self):
'\n Fornece uma representação do tipo String do grafo.\n O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.\n :return: Uma string que representa o grafo\n '
espaco = (' ' * self.__maior_vertice)
grafo_str = (espaco + ' ')
for v in range(len(self.N)):
grafo_str += self.N[v]
if (v < (len(self.N) - 1)):
grafo_str += ' '
grafo_str += '\n'
for l in range(len(self.M)):
grafo_str += (self.N[l] + ' ')
for c in range(len(self.M)):
grafo_str += (str(self.M[l][c]) + ' ')
grafo_str += '\n'
return grafo_str | def __str__(self):
'\n Fornece uma representação do tipo String do grafo.\n O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.\n :return: Uma string que representa o grafo\n '
espaco = (' ' * self.__maior_vertice)
grafo_str = (espaco + ' ')
for v in range(len(self.N)):
grafo_str += self.N[v]
if (v < (len(self.N) - 1)):
grafo_str += ' '
grafo_str += '\n'
for l in range(len(self.M)):
grafo_str += (self.N[l] + ' ')
for c in range(len(self.M)):
grafo_str += (str(self.M[l][c]) + ' ')
grafo_str += '\n'
return grafo_str<|docstring|>Fornece uma representação do tipo String do grafo.
O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.
:return: Uma string que representa o grafo<|endoftext|> |
1f250bfc98bdce9b44ea9738ae1ad2586c55f90d0d601bf696208f452ec6f472 | def flownets33(data=None, args=None):
'FlowNetS model architecture from the\n "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)\n\n Args:\n data : pretrained weights of the network. will create a new one if not set\n '
model = FlowNetS(batchNorm=False, bias=args.conv_no_bias)
if (data is not None):
model.load_state_dict(data['state_dict'], strict=False)
return model | FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set | models/FlowNetS_3x3.py | flownets33 | kairx772/FlowNetPytorch | 2 | python | def flownets33(data=None, args=None):
'FlowNetS model architecture from the\n "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)\n\n Args:\n data : pretrained weights of the network. will create a new one if not set\n '
model = FlowNetS(batchNorm=False, bias=args.conv_no_bias)
if (data is not None):
model.load_state_dict(data['state_dict'], strict=False)
return model | def flownets33(data=None, args=None):
'FlowNetS model architecture from the\n "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)\n\n Args:\n data : pretrained weights of the network. will create a new one if not set\n '
model = FlowNetS(batchNorm=False, bias=args.conv_no_bias)
if (data is not None):
model.load_state_dict(data['state_dict'], strict=False)
return model<|docstring|>FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set<|endoftext|> |
0ee5b0c1e26d79ec186b8082bcd74215b42f228493475d87021e71aee5378b8f | def flownets33_bn(data=None):
'FlowNetS model architecture from the\n "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)\n\n Args:\n data : pretrained weights of the network. will create a new one if not set\n '
model = FlowNetS(batchNorm=True)
if (data is not None):
model.load_state_dict(data['state_dict'], strict=False)
return model | FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set | models/FlowNetS_3x3.py | flownets33_bn | kairx772/FlowNetPytorch | 2 | python | def flownets33_bn(data=None):
'FlowNetS model architecture from the\n "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)\n\n Args:\n data : pretrained weights of the network. will create a new one if not set\n '
model = FlowNetS(batchNorm=True)
if (data is not None):
model.load_state_dict(data['state_dict'], strict=False)
return model | def flownets33_bn(data=None):
'FlowNetS model architecture from the\n "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)\n\n Args:\n data : pretrained weights of the network. will create a new one if not set\n '
model = FlowNetS(batchNorm=True)
if (data is not None):
model.load_state_dict(data['state_dict'], strict=False)
return model<|docstring|>FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set<|endoftext|> |
9fb3c92d12d7096d43edc623824740099dc6738bf1179b4e32b274853ab2a208 | def handleForever(self):
'\n Delegates user input to the handling function when activated.\n '
self._logger.info("Starting to handle conversation with keyword '%s'.", self.persona)
while True:
threshold = self.mic.fetchThreshold()
input = self.mic.activeListenToAllOptions(threshold)
print(input)
if input:
self.brain.query(input)
else:
print('no words detected')
self._logger.info('Nothing has been said.') | Delegates user input to the handling function when activated. | client/conversation.py | handleForever | joisse1101/reverb | 0 | python | def handleForever(self):
'\n \n '
self._logger.info("Starting to handle conversation with keyword '%s'.", self.persona)
while True:
threshold = self.mic.fetchThreshold()
input = self.mic.activeListenToAllOptions(threshold)
print(input)
if input:
self.brain.query(input)
else:
print('no words detected')
self._logger.info('Nothing has been said.') | def handleForever(self):
'\n \n '
self._logger.info("Starting to handle conversation with keyword '%s'.", self.persona)
while True:
threshold = self.mic.fetchThreshold()
input = self.mic.activeListenToAllOptions(threshold)
print(input)
if input:
self.brain.query(input)
else:
print('no words detected')
self._logger.info('Nothing has been said.')<|docstring|>Delegates user input to the handling function when activated.<|endoftext|> |
9cb35a6349476011051cc141c3b4aa1fb104ca007270c95ccd0a04daaff39178 | def __init__(self, weighted_mode=True, sum_or_fraction='fraction', re_mask=True):
'\n Initialise class.\n\n Args:\n weighted_mode (bool):\n This is included to allow a standard interface for both the\n square and circular neighbourhood plugins.\n sum_or_fraction (str):\n Identifier for whether sum or fraction should be returned from\n neighbourhooding. The sum represents the sum of the\n neighbourhood. The fraction represents the sum of the\n neighbourhood divided by the neighbourhood area.\n Valid options are "sum" or "fraction".\n re_mask (bool):\n If re_mask is True, the original un-neighbourhood processed\n mask is applied to mask out the neighbourhood processed cube.\n If re_mask is False, the original un-neighbourhood processed\n mask is not applied. Therefore, the neighbourhood processing\n may result in values being present in areas that were\n originally masked.\n '
self.weighted_mode = weighted_mode
if (sum_or_fraction not in ['sum', 'fraction']):
msg = "The neighbourhood output can either be in the form of a sum of all the points in the neighbourhood or a fraction of the sum of the neighbourhood divided by the neighbourhood area. The {} option is invalid. Valid options are 'sum' or 'fraction'."
raise ValueError(msg)
self.sum_or_fraction = sum_or_fraction
self.re_mask = re_mask | Initialise class.
Args:
weighted_mode (bool):
This is included to allow a standard interface for both the
square and circular neighbourhood plugins.
sum_or_fraction (str):
Identifier for whether sum or fraction should be returned from
neighbourhooding. The sum represents the sum of the
neighbourhood. The fraction represents the sum of the
neighbourhood divided by the neighbourhood area.
Valid options are "sum" or "fraction".
re_mask (bool):
If re_mask is True, the original un-neighbourhood processed
mask is applied to mask out the neighbourhood processed cube.
If re_mask is False, the original un-neighbourhood processed
mask is not applied. Therefore, the neighbourhood processing
may result in values being present in areas that were
originally masked. | improver/nbhood/square_kernel.py | __init__ | ddlddl58/improver | 1 | python | def __init__(self, weighted_mode=True, sum_or_fraction='fraction', re_mask=True):
'\n Initialise class.\n\n Args:\n weighted_mode (bool):\n This is included to allow a standard interface for both the\n square and circular neighbourhood plugins.\n sum_or_fraction (str):\n Identifier for whether sum or fraction should be returned from\n neighbourhooding. The sum represents the sum of the\n neighbourhood. The fraction represents the sum of the\n neighbourhood divided by the neighbourhood area.\n Valid options are "sum" or "fraction".\n re_mask (bool):\n If re_mask is True, the original un-neighbourhood processed\n mask is applied to mask out the neighbourhood processed cube.\n If re_mask is False, the original un-neighbourhood processed\n mask is not applied. Therefore, the neighbourhood processing\n may result in values being present in areas that were\n originally masked.\n '
self.weighted_mode = weighted_mode
if (sum_or_fraction not in ['sum', 'fraction']):
msg = "The neighbourhood output can either be in the form of a sum of all the points in the neighbourhood or a fraction of the sum of the neighbourhood divided by the neighbourhood area. The {} option is invalid. Valid options are 'sum' or 'fraction'."
raise ValueError(msg)
self.sum_or_fraction = sum_or_fraction
self.re_mask = re_mask | def __init__(self, weighted_mode=True, sum_or_fraction='fraction', re_mask=True):
'\n Initialise class.\n\n Args:\n weighted_mode (bool):\n This is included to allow a standard interface for both the\n square and circular neighbourhood plugins.\n sum_or_fraction (str):\n Identifier for whether sum or fraction should be returned from\n neighbourhooding. The sum represents the sum of the\n neighbourhood. The fraction represents the sum of the\n neighbourhood divided by the neighbourhood area.\n Valid options are "sum" or "fraction".\n re_mask (bool):\n If re_mask is True, the original un-neighbourhood processed\n mask is applied to mask out the neighbourhood processed cube.\n If re_mask is False, the original un-neighbourhood processed\n mask is not applied. Therefore, the neighbourhood processing\n may result in values being present in areas that were\n originally masked.\n '
self.weighted_mode = weighted_mode
if (sum_or_fraction not in ['sum', 'fraction']):
msg = "The neighbourhood output can either be in the form of a sum of all the points in the neighbourhood or a fraction of the sum of the neighbourhood divided by the neighbourhood area. The {} option is invalid. Valid options are 'sum' or 'fraction'."
raise ValueError(msg)
self.sum_or_fraction = sum_or_fraction
self.re_mask = re_mask<|docstring|>Initialise class.
Args:
weighted_mode (bool):
This is included to allow a standard interface for both the
square and circular neighbourhood plugins.
sum_or_fraction (str):
Identifier for whether sum or fraction should be returned from
neighbourhooding. The sum represents the sum of the
neighbourhood. The fraction represents the sum of the
neighbourhood divided by the neighbourhood area.
Valid options are "sum" or "fraction".
re_mask (bool):
If re_mask is True, the original un-neighbourhood processed
mask is applied to mask out the neighbourhood processed cube.
If re_mask is False, the original un-neighbourhood processed
mask is not applied. Therefore, the neighbourhood processing
may result in values being present in areas that were
originally masked.<|endoftext|> |
58a5f6ae2512801458db16fdba6745b271f26754c8f1ecb07b37675508336c68 | def __repr__(self):
'Represent the configured plugin instance as a string.'
result = '<SquareNeighbourhood: weighted_mode: {}, sum_or_fraction: {}, re_mask: {}>'
return result.format(self.weighted_mode, self.sum_or_fraction, self.re_mask) | Represent the configured plugin instance as a string. | improver/nbhood/square_kernel.py | __repr__ | ddlddl58/improver | 1 | python | def __repr__(self):
result = '<SquareNeighbourhood: weighted_mode: {}, sum_or_fraction: {}, re_mask: {}>'
return result.format(self.weighted_mode, self.sum_or_fraction, self.re_mask) | def __repr__(self):
result = '<SquareNeighbourhood: weighted_mode: {}, sum_or_fraction: {}, re_mask: {}>'
return result.format(self.weighted_mode, self.sum_or_fraction, self.re_mask)<|docstring|>Represent the configured plugin instance as a string.<|endoftext|> |
6c33281be8fef6b789871aeaaf072eff7c05e080ffad8f424a5bf5f1222bd847 | @staticmethod
def cumulate_array(cube, iscomplex=False):
'\n Method to calculate the cumulative sum of an m x n array, by first\n cumulating along the y direction so that the largest values\n are in the nth row, and then cumulating along the x direction,\n so that the largest values are in the mth column. Each grid point\n will contain the cumulative sum from the origin to that grid point.\n\n Args:\n cube (iris.cube.Cube):\n Cube to which the cumulative summing along the y and x\n direction will be applied. The cube should contain only x and\n y dimensions, so will generally be a slice of a cube ordered\n so that y is first in the cube (i.e. axis=0).\n iscomplex (bool):\n Flag indicating whether cube.data contains complex values.\n\n Returns:\n iris.cube.Cube:\n Cube to which the cumulative summing\n along the y and x direction has been applied.\n '
summed_cube = cube.copy()
if iscomplex:
data = cube.data.astype(complex)
elif cube.name().startswith('probability_of'):
data = cube.data.astype(np.float32)
else:
data = cube.data.astype(np.longdouble)
data_summed_along_y = np.cumsum(data, axis=0)
data_summed_along_x = np.cumsum(data_summed_along_y, axis=1)
summed_cube.data = data_summed_along_x
return summed_cube | Method to calculate the cumulative sum of an m x n array, by first
cumulating along the y direction so that the largest values
are in the nth row, and then cumulating along the x direction,
so that the largest values are in the mth column. Each grid point
will contain the cumulative sum from the origin to that grid point.
Args:
cube (iris.cube.Cube):
Cube to which the cumulative summing along the y and x
direction will be applied. The cube should contain only x and
y dimensions, so will generally be a slice of a cube ordered
so that y is first in the cube (i.e. axis=0).
iscomplex (bool):
Flag indicating whether cube.data contains complex values.
Returns:
iris.cube.Cube:
Cube to which the cumulative summing
along the y and x direction has been applied. | improver/nbhood/square_kernel.py | cumulate_array | ddlddl58/improver | 1 | python | @staticmethod
def cumulate_array(cube, iscomplex=False):
'\n Method to calculate the cumulative sum of an m x n array, by first\n cumulating along the y direction so that the largest values\n are in the nth row, and then cumulating along the x direction,\n so that the largest values are in the mth column. Each grid point\n will contain the cumulative sum from the origin to that grid point.\n\n Args:\n cube (iris.cube.Cube):\n Cube to which the cumulative summing along the y and x\n direction will be applied. The cube should contain only x and\n y dimensions, so will generally be a slice of a cube ordered\n so that y is first in the cube (i.e. axis=0).\n iscomplex (bool):\n Flag indicating whether cube.data contains complex values.\n\n Returns:\n iris.cube.Cube:\n Cube to which the cumulative summing\n along the y and x direction has been applied.\n '
summed_cube = cube.copy()
if iscomplex:
data = cube.data.astype(complex)
elif cube.name().startswith('probability_of'):
data = cube.data.astype(np.float32)
else:
data = cube.data.astype(np.longdouble)
data_summed_along_y = np.cumsum(data, axis=0)
data_summed_along_x = np.cumsum(data_summed_along_y, axis=1)
summed_cube.data = data_summed_along_x
return summed_cube | @staticmethod
def cumulate_array(cube, iscomplex=False):
'\n Method to calculate the cumulative sum of an m x n array, by first\n cumulating along the y direction so that the largest values\n are in the nth row, and then cumulating along the x direction,\n so that the largest values are in the mth column. Each grid point\n will contain the cumulative sum from the origin to that grid point.\n\n Args:\n cube (iris.cube.Cube):\n Cube to which the cumulative summing along the y and x\n direction will be applied. The cube should contain only x and\n y dimensions, so will generally be a slice of a cube ordered\n so that y is first in the cube (i.e. axis=0).\n iscomplex (bool):\n Flag indicating whether cube.data contains complex values.\n\n Returns:\n iris.cube.Cube:\n Cube to which the cumulative summing\n along the y and x direction has been applied.\n '
summed_cube = cube.copy()
if iscomplex:
data = cube.data.astype(complex)
elif cube.name().startswith('probability_of'):
data = cube.data.astype(np.float32)
else:
data = cube.data.astype(np.longdouble)
data_summed_along_y = np.cumsum(data, axis=0)
data_summed_along_x = np.cumsum(data_summed_along_y, axis=1)
summed_cube.data = data_summed_along_x
return summed_cube<|docstring|>Method to calculate the cumulative sum of an m x n array, by first
cumulating along the y direction so that the largest values
are in the nth row, and then cumulating along the x direction,
so that the largest values are in the mth column. Each grid point
will contain the cumulative sum from the origin to that grid point.
Args:
cube (iris.cube.Cube):
Cube to which the cumulative summing along the y and x
direction will be applied. The cube should contain only x and
y dimensions, so will generally be a slice of a cube ordered
so that y is first in the cube (i.e. axis=0).
iscomplex (bool):
Flag indicating whether cube.data contains complex values.
Returns:
iris.cube.Cube:
Cube to which the cumulative summing
along the y and x direction has been applied.<|endoftext|> |
876866951414b9aa106db2a1388b512b56c27387f86b333994143d6727146765 | @staticmethod
def calculate_neighbourhood(summed_cube, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns):
'\n Fast vectorised approach to calculating neighbourhood totals.\n\n Displacements are calculated as follows for the following input array,\n where the accumulation has occurred from top to\n bottom and left to right::\n\n | 1 | 2 | 2 | 2 |\n | 1 | 3 | 4 | 4 |\n | 2 | 4 | 5 | 6 |\n | 2 | 4 | 6 | 7 |\n\n\n For a 3x3 neighbourhood centred around the point with a value of 0000:0000:0000:0000:0000:0000:0000:0000\n\n | 1 (C) | 2 | 2 | 2 (D) |\n | 1 | 3 | 4 | 4 |\n | 2 | 4 | 5 (Central point) | 6 |\n | 2 (A) | 4 | 6 | 7 (B) |\n\n To calculate the value for the neighbourhood sum at the "Central point"\n with a value of 5, calculate::\n\n Neighbourhood sum = B - A - D + C\n\n At the central point, this will yield::\n\n Neighbourhood sum = 7 - 2 - 2 +1 => 4\n\n Args:\n summed_cube (iris.cube.Cube):\n cube on which to calculate the neighbourhood total.\n ymax_xmax_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point B in the docstring example.\n ymax_xmin_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point A in the docstring example.\n ymin_xmax_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point D in the docstring example.\n ymin_xmin_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point C in the docstring example.\n n_rows (int):\n Number of rows\n n_columns (int):\n Number of columns\n\n Returns:\n numpy.ndarray:\n Array containing the calculated neighbourhood total.\n '
flattened = summed_cube.data.flatten()
ymax_xmax_array = np.roll(flattened, (- ymax_xmax_disp))
ymin_xmax_array = np.roll(flattened, (- ymin_xmax_disp))
ymin_xmin_array = np.roll(flattened, (- ymin_xmin_disp))
ymax_xmin_array = np.roll(flattened, (- ymax_xmin_disp))
neighbourhood_total = (((ymax_xmax_array - ymin_xmax_array) + ymin_xmin_array) - ymax_xmin_array)
neighbourhood_total.resize(n_rows, n_columns)
return neighbourhood_total | Fast vectorised approach to calculating neighbourhood totals.
Displacements are calculated as follows for the following input array,
where the accumulation has occurred from top to
bottom and left to right::
| 1 | 2 | 2 | 2 |
| 1 | 3 | 4 | 4 |
| 2 | 4 | 5 | 6 |
| 2 | 4 | 6 | 7 |
For a 3x3 neighbourhood centred around the point with a value of 0000:0000:0000:0000:0000:0000:0000:0000
| 1 (C) | 2 | 2 | 2 (D) |
| 1 | 3 | 4 | 4 |
| 2 | 4 | 5 (Central point) | 6 |
| 2 (A) | 4 | 6 | 7 (B) |
To calculate the value for the neighbourhood sum at the "Central point"
with a value of 5, calculate::
Neighbourhood sum = B - A - D + C
At the central point, this will yield::
Neighbourhood sum = 7 - 2 - 2 +1 => 4
Args:
summed_cube (iris.cube.Cube):
cube on which to calculate the neighbourhood total.
ymax_xmax_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point B in the docstring example.
ymax_xmin_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point A in the docstring example.
ymin_xmax_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point D in the docstring example.
ymin_xmin_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point C in the docstring example.
n_rows (int):
Number of rows
n_columns (int):
Number of columns
Returns:
numpy.ndarray:
Array containing the calculated neighbourhood total. | improver/nbhood/square_kernel.py | calculate_neighbourhood | ddlddl58/improver | 1 | python | @staticmethod
def calculate_neighbourhood(summed_cube, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns):
'\n Fast vectorised approach to calculating neighbourhood totals.\n\n Displacements are calculated as follows for the following input array,\n where the accumulation has occurred from top to\n bottom and left to right::\n\n | 1 | 2 | 2 | 2 |\n | 1 | 3 | 4 | 4 |\n | 2 | 4 | 5 | 6 |\n | 2 | 4 | 6 | 7 |\n\n\n For a 3x3 neighbourhood centred around the point with a value of 0000:0000:0000:0000:0000:0000:0000:0000\n\n | 1 (C) | 2 | 2 | 2 (D) |\n | 1 | 3 | 4 | 4 |\n | 2 | 4 | 5 (Central point) | 6 |\n | 2 (A) | 4 | 6 | 7 (B) |\n\n To calculate the value for the neighbourhood sum at the "Central point"\n with a value of 5, calculate::\n\n Neighbourhood sum = B - A - D + C\n\n At the central point, this will yield::\n\n Neighbourhood sum = 7 - 2 - 2 +1 => 4\n\n Args:\n summed_cube (iris.cube.Cube):\n cube on which to calculate the neighbourhood total.\n ymax_xmax_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point B in the docstring example.\n ymax_xmin_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point A in the docstring example.\n ymin_xmax_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point D in the docstring example.\n ymin_xmin_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point C in the docstring example.\n n_rows (int):\n Number of rows\n n_columns (int):\n Number of columns\n\n Returns:\n numpy.ndarray:\n Array containing the calculated neighbourhood total.\n '
flattened = summed_cube.data.flatten()
ymax_xmax_array = np.roll(flattened, (- ymax_xmax_disp))
ymin_xmax_array = np.roll(flattened, (- ymin_xmax_disp))
ymin_xmin_array = np.roll(flattened, (- ymin_xmin_disp))
ymax_xmin_array = np.roll(flattened, (- ymax_xmin_disp))
neighbourhood_total = (((ymax_xmax_array - ymin_xmax_array) + ymin_xmin_array) - ymax_xmin_array)
neighbourhood_total.resize(n_rows, n_columns)
return neighbourhood_total | @staticmethod
def calculate_neighbourhood(summed_cube, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns):
'\n Fast vectorised approach to calculating neighbourhood totals.\n\n Displacements are calculated as follows for the following input array,\n where the accumulation has occurred from top to\n bottom and left to right::\n\n | 1 | 2 | 2 | 2 |\n | 1 | 3 | 4 | 4 |\n | 2 | 4 | 5 | 6 |\n | 2 | 4 | 6 | 7 |\n\n\n For a 3x3 neighbourhood centred around the point with a value of 0000:0000:0000:0000:0000:0000:0000:0000\n\n | 1 (C) | 2 | 2 | 2 (D) |\n | 1 | 3 | 4 | 4 |\n | 2 | 4 | 5 (Central point) | 6 |\n | 2 (A) | 4 | 6 | 7 (B) |\n\n To calculate the value for the neighbourhood sum at the "Central point"\n with a value of 5, calculate::\n\n Neighbourhood sum = B - A - D + C\n\n At the central point, this will yield::\n\n Neighbourhood sum = 7 - 2 - 2 +1 => 4\n\n Args:\n summed_cube (iris.cube.Cube):\n cube on which to calculate the neighbourhood total.\n ymax_xmax_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point B in the docstring example.\n ymax_xmin_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point A in the docstring example.\n ymin_xmax_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point D in the docstring example.\n ymin_xmin_disp (int):\n Displacement from the point at the centre\n of the neighbourhood.\n Equivalent to point C in the docstring example.\n n_rows (int):\n Number of rows\n n_columns (int):\n Number of columns\n\n Returns:\n numpy.ndarray:\n Array containing the calculated neighbourhood total.\n '
flattened = summed_cube.data.flatten()
ymax_xmax_array = np.roll(flattened, (- ymax_xmax_disp))
ymin_xmax_array = np.roll(flattened, (- ymin_xmax_disp))
ymin_xmin_array = np.roll(flattened, (- ymin_xmin_disp))
ymax_xmin_array = np.roll(flattened, (- ymax_xmin_disp))
neighbourhood_total = (((ymax_xmax_array - ymin_xmax_array) + ymin_xmin_array) - ymax_xmin_array)
neighbourhood_total.resize(n_rows, n_columns)
return neighbourhood_total<|docstring|>Fast vectorised approach to calculating neighbourhood totals.
Displacements are calculated as follows for the following input array,
where the accumulation has occurred from top to
bottom and left to right::
| 1 | 2 | 2 | 2 |
| 1 | 3 | 4 | 4 |
| 2 | 4 | 5 | 6 |
| 2 | 4 | 6 | 7 |
For a 3x3 neighbourhood centred around the point with a value of 0000:0000:0000:0000:0000:0000:0000:0000
| 1 (C) | 2 | 2 | 2 (D) |
| 1 | 3 | 4 | 4 |
| 2 | 4 | 5 (Central point) | 6 |
| 2 (A) | 4 | 6 | 7 (B) |
To calculate the value for the neighbourhood sum at the "Central point"
with a value of 5, calculate::
Neighbourhood sum = B - A - D + C
At the central point, this will yield::
Neighbourhood sum = 7 - 2 - 2 +1 => 4
Args:
summed_cube (iris.cube.Cube):
cube on which to calculate the neighbourhood total.
ymax_xmax_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point B in the docstring example.
ymax_xmin_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point A in the docstring example.
ymin_xmax_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point D in the docstring example.
ymin_xmin_disp (int):
Displacement from the point at the centre
of the neighbourhood.
Equivalent to point C in the docstring example.
n_rows (int):
Number of rows
n_columns (int):
Number of columns
Returns:
numpy.ndarray:
Array containing the calculated neighbourhood total.<|endoftext|> |
377dc001e53d406933e0220ecbbc145cd76d3533d01a1e2c7dd239e0d4bfa7f3 | def mean_over_neighbourhood(self, summed_cube, summed_mask, cells, iscomplex=False):
'\n Method to calculate the average value in a square neighbourhood using\n the 4-point algorithm to find the total sum over the neighbourhood.\n\n The output from the cumulate_array method can be used to\n calculate the sum over a neighbourhood of size\n (2*cells+1)**2. This sum is then divided by the area of\n the neighbourhood to calculate the mean value in the neighbourhood.\n\n For all points, a fast vectorised approach is taken:\n\n 1. The displacements between the four points used to calculate the\n neighbourhood total sum and the central grid point are calculated.\n 2. Within the function calculate_neighbourhood...\n Four copies of the cumulate array output are flattened and rolled\n by these displacements to align the four terms used in the\n neighbourhood total sum calculation.\n 3. The neighbourhood total at all points can then be calculated\n simultaneously in a single vector sum.\n\n Neighbourhood mean = Neighbourhood sum / Neighbourhood area\n\n Neighbourhood area = (2 * nb_width +1)^2 if there are no missing\n points, nb_width is the neighbourhood width, which is equal to 1 for a\n 3x3 neighbourhood.\n\n Args:\n summed_cube (iris.cube.Cube):\n Summed Cube to which neighbourhood processing is being\n applied. Must be passed through cumulate_array method first.\n The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n summed_mask (iris.cube.Cube):\n Summed Mask used to calculate neighbourhood size.\n Must be passed through cumulate_array method first.\n The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n cells (int):\n The radius of the neighbourhood in grid points, in the x\n direction (excluding the central grid point).\n iscomplex (bool):\n Flag indicating whether cube.data contains complex values.\n\n Returns:\n iris.cube.Cube:\n Cube to which square neighbourhood has been applied.\n '
cube = summed_cube
check_for_x_and_y_axes(summed_cube)
n_rows = len(cube.coord(axis='y').points)
n_columns = len(cube.coord(axis='x').points)
ymax_xmax_disp = ((cells * n_columns) + cells)
ymax_xmin_disp = (((cells * n_columns) - cells) - 1)
ymin_xmax_disp = ((((- 1) * (cells + 1)) * n_columns) + cells)
ymin_xmin_disp = (((((- 1) * (cells + 1)) * n_columns) - cells) - 1)
neighbourhood_total = self.calculate_neighbourhood(summed_cube, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns)
if (self.sum_or_fraction == 'fraction'):
neighbourhood_area = self.calculate_neighbourhood(summed_mask, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns)
with np.errstate(invalid='ignore', divide='ignore'):
if iscomplex:
cube.data = (neighbourhood_total.astype(complex) / neighbourhood_area.astype(complex))
else:
cube.data = (neighbourhood_total.astype(float) / neighbourhood_area.astype(float))
cube.data[(~ np.isfinite(cube.data))] = np.nan
elif (self.sum_or_fraction == 'sum'):
if iscomplex:
cube.data = neighbourhood_total.astype(complex)
else:
cube.data = neighbourhood_total.astype(float)
return cube | Method to calculate the average value in a square neighbourhood using
the 4-point algorithm to find the total sum over the neighbourhood.
The output from the cumulate_array method can be used to
calculate the sum over a neighbourhood of size
(2*cells+1)**2. This sum is then divided by the area of
the neighbourhood to calculate the mean value in the neighbourhood.
For all points, a fast vectorised approach is taken:
1. The displacements between the four points used to calculate the
neighbourhood total sum and the central grid point are calculated.
2. Within the function calculate_neighbourhood...
Four copies of the cumulate array output are flattened and rolled
by these displacements to align the four terms used in the
neighbourhood total sum calculation.
3. The neighbourhood total at all points can then be calculated
simultaneously in a single vector sum.
Neighbourhood mean = Neighbourhood sum / Neighbourhood area
Neighbourhood area = (2 * nb_width +1)^2 if there are no missing
points, nb_width is the neighbourhood width, which is equal to 1 for a
3x3 neighbourhood.
Args:
summed_cube (iris.cube.Cube):
Summed Cube to which neighbourhood processing is being
applied. Must be passed through cumulate_array method first.
The cube should contain only x and y dimensions,
so will generally be a slice of a cube.
summed_mask (iris.cube.Cube):
Summed Mask used to calculate neighbourhood size.
Must be passed through cumulate_array method first.
The cube should contain only x and y dimensions,
so will generally be a slice of a cube.
cells (int):
The radius of the neighbourhood in grid points, in the x
direction (excluding the central grid point).
iscomplex (bool):
Flag indicating whether cube.data contains complex values.
Returns:
iris.cube.Cube:
Cube to which square neighbourhood has been applied. | improver/nbhood/square_kernel.py | mean_over_neighbourhood | ddlddl58/improver | 1 | python | def mean_over_neighbourhood(self, summed_cube, summed_mask, cells, iscomplex=False):
'\n Method to calculate the average value in a square neighbourhood using\n the 4-point algorithm to find the total sum over the neighbourhood.\n\n The output from the cumulate_array method can be used to\n calculate the sum over a neighbourhood of size\n (2*cells+1)**2. This sum is then divided by the area of\n the neighbourhood to calculate the mean value in the neighbourhood.\n\n For all points, a fast vectorised approach is taken:\n\n 1. The displacements between the four points used to calculate the\n neighbourhood total sum and the central grid point are calculated.\n 2. Within the function calculate_neighbourhood...\n Four copies of the cumulate array output are flattened and rolled\n by these displacements to align the four terms used in the\n neighbourhood total sum calculation.\n 3. The neighbourhood total at all points can then be calculated\n simultaneously in a single vector sum.\n\n Neighbourhood mean = Neighbourhood sum / Neighbourhood area\n\n Neighbourhood area = (2 * nb_width +1)^2 if there are no missing\n points, nb_width is the neighbourhood width, which is equal to 1 for a\n 3x3 neighbourhood.\n\n Args:\n summed_cube (iris.cube.Cube):\n Summed Cube to which neighbourhood processing is being\n applied. Must be passed through cumulate_array method first.\n The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n summed_mask (iris.cube.Cube):\n Summed Mask used to calculate neighbourhood size.\n Must be passed through cumulate_array method first.\n The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n cells (int):\n The radius of the neighbourhood in grid points, in the x\n direction (excluding the central grid point).\n iscomplex (bool):\n Flag indicating whether cube.data contains complex values.\n\n Returns:\n iris.cube.Cube:\n Cube to which square neighbourhood has been applied.\n '
cube = summed_cube
check_for_x_and_y_axes(summed_cube)
n_rows = len(cube.coord(axis='y').points)
n_columns = len(cube.coord(axis='x').points)
ymax_xmax_disp = ((cells * n_columns) + cells)
ymax_xmin_disp = (((cells * n_columns) - cells) - 1)
ymin_xmax_disp = ((((- 1) * (cells + 1)) * n_columns) + cells)
ymin_xmin_disp = (((((- 1) * (cells + 1)) * n_columns) - cells) - 1)
neighbourhood_total = self.calculate_neighbourhood(summed_cube, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns)
if (self.sum_or_fraction == 'fraction'):
neighbourhood_area = self.calculate_neighbourhood(summed_mask, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns)
with np.errstate(invalid='ignore', divide='ignore'):
if iscomplex:
cube.data = (neighbourhood_total.astype(complex) / neighbourhood_area.astype(complex))
else:
cube.data = (neighbourhood_total.astype(float) / neighbourhood_area.astype(float))
cube.data[(~ np.isfinite(cube.data))] = np.nan
elif (self.sum_or_fraction == 'sum'):
if iscomplex:
cube.data = neighbourhood_total.astype(complex)
else:
cube.data = neighbourhood_total.astype(float)
return cube | def mean_over_neighbourhood(self, summed_cube, summed_mask, cells, iscomplex=False):
'\n Method to calculate the average value in a square neighbourhood using\n the 4-point algorithm to find the total sum over the neighbourhood.\n\n The output from the cumulate_array method can be used to\n calculate the sum over a neighbourhood of size\n (2*cells+1)**2. This sum is then divided by the area of\n the neighbourhood to calculate the mean value in the neighbourhood.\n\n For all points, a fast vectorised approach is taken:\n\n 1. The displacements between the four points used to calculate the\n neighbourhood total sum and the central grid point are calculated.\n 2. Within the function calculate_neighbourhood...\n Four copies of the cumulate array output are flattened and rolled\n by these displacements to align the four terms used in the\n neighbourhood total sum calculation.\n 3. The neighbourhood total at all points can then be calculated\n simultaneously in a single vector sum.\n\n Neighbourhood mean = Neighbourhood sum / Neighbourhood area\n\n Neighbourhood area = (2 * nb_width +1)^2 if there are no missing\n points, nb_width is the neighbourhood width, which is equal to 1 for a\n 3x3 neighbourhood.\n\n Args:\n summed_cube (iris.cube.Cube):\n Summed Cube to which neighbourhood processing is being\n applied. Must be passed through cumulate_array method first.\n The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n summed_mask (iris.cube.Cube):\n Summed Mask used to calculate neighbourhood size.\n Must be passed through cumulate_array method first.\n The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n cells (int):\n The radius of the neighbourhood in grid points, in the x\n direction (excluding the central grid point).\n iscomplex (bool):\n Flag indicating whether cube.data contains complex values.\n\n Returns:\n iris.cube.Cube:\n Cube to which square neighbourhood has been applied.\n '
cube = summed_cube
check_for_x_and_y_axes(summed_cube)
n_rows = len(cube.coord(axis='y').points)
n_columns = len(cube.coord(axis='x').points)
ymax_xmax_disp = ((cells * n_columns) + cells)
ymax_xmin_disp = (((cells * n_columns) - cells) - 1)
ymin_xmax_disp = ((((- 1) * (cells + 1)) * n_columns) + cells)
ymin_xmin_disp = (((((- 1) * (cells + 1)) * n_columns) - cells) - 1)
neighbourhood_total = self.calculate_neighbourhood(summed_cube, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns)
if (self.sum_or_fraction == 'fraction'):
neighbourhood_area = self.calculate_neighbourhood(summed_mask, ymax_xmax_disp, ymin_xmax_disp, ymin_xmin_disp, ymax_xmin_disp, n_rows, n_columns)
with np.errstate(invalid='ignore', divide='ignore'):
if iscomplex:
cube.data = (neighbourhood_total.astype(complex) / neighbourhood_area.astype(complex))
else:
cube.data = (neighbourhood_total.astype(float) / neighbourhood_area.astype(float))
cube.data[(~ np.isfinite(cube.data))] = np.nan
elif (self.sum_or_fraction == 'sum'):
if iscomplex:
cube.data = neighbourhood_total.astype(complex)
else:
cube.data = neighbourhood_total.astype(float)
return cube<|docstring|>Method to calculate the average value in a square neighbourhood using
the 4-point algorithm to find the total sum over the neighbourhood.
The output from the cumulate_array method can be used to
calculate the sum over a neighbourhood of size
(2*cells+1)**2. This sum is then divided by the area of
the neighbourhood to calculate the mean value in the neighbourhood.
For all points, a fast vectorised approach is taken:
1. The displacements between the four points used to calculate the
neighbourhood total sum and the central grid point are calculated.
2. Within the function calculate_neighbourhood...
Four copies of the cumulate array output are flattened and rolled
by these displacements to align the four terms used in the
neighbourhood total sum calculation.
3. The neighbourhood total at all points can then be calculated
simultaneously in a single vector sum.
Neighbourhood mean = Neighbourhood sum / Neighbourhood area
Neighbourhood area = (2 * nb_width +1)^2 if there are no missing
points, nb_width is the neighbourhood width, which is equal to 1 for a
3x3 neighbourhood.
Args:
summed_cube (iris.cube.Cube):
Summed Cube to which neighbourhood processing is being
applied. Must be passed through cumulate_array method first.
The cube should contain only x and y dimensions,
so will generally be a slice of a cube.
summed_mask (iris.cube.Cube):
Summed Mask used to calculate neighbourhood size.
Must be passed through cumulate_array method first.
The cube should contain only x and y dimensions,
so will generally be a slice of a cube.
cells (int):
The radius of the neighbourhood in grid points, in the x
direction (excluding the central grid point).
iscomplex (bool):
Flag indicating whether cube.data contains complex values.
Returns:
iris.cube.Cube:
Cube to which square neighbourhood has been applied.<|endoftext|> |
e61f2d24582a9d9fb56feea7ceddfbcee3d1ea84a65536318be72b4f4a319d28 | @staticmethod
def set_up_cubes_to_be_neighbourhooded(cube, mask_cube=None):
'\n Set up a cube ready for neighourhooding the data.\n\n Args:\n cube (iris.cube.Cube):\n Cube that will be checked for whether the data is masked\n or nan. The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n mask_cube (iris.cube.Cube):\n Input Cube containing the array to be used as a mask.\n\n Returns:\n (tuple): tuple containing:\n **cube** (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n **mask** (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n **nan_array** (numpy.ndarray):\n numpy array to be used to set the values within\n the data of the output cube to be NaN.\n\n '
if (not mask_cube):
mask = cube.copy()
mask.data = np.real(np.ones_like(mask.data))
else:
mask = mask_cube
if isinstance(cube.data, np.ma.MaskedArray):
index = np.where((cube.data.mask.astype(int) == 1))
mask.data[index] = 0.0
cube.data = cube.data.data
mask.rename('mask_data')
cube = iris.util.squeeze(cube)
mask = iris.util.squeeze(mask)
nan_array = np.isnan(cube.data)
mask.data[nan_array] = 0.0
cube.data[nan_array] = 0.0
cube.data = (cube.data * mask.data).astype(cube.data.dtype)
return (cube, mask, nan_array) | Set up a cube ready for neighourhooding the data.
Args:
cube (iris.cube.Cube):
Cube that will be checked for whether the data is masked
or nan. The cube should contain only x and y dimensions,
so will generally be a slice of a cube.
mask_cube (iris.cube.Cube):
Input Cube containing the array to be used as a mask.
Returns:
(tuple): tuple containing:
**cube** (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
**mask** (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
**nan_array** (numpy.ndarray):
numpy array to be used to set the values within
the data of the output cube to be NaN. | improver/nbhood/square_kernel.py | set_up_cubes_to_be_neighbourhooded | ddlddl58/improver | 1 | python | @staticmethod
def set_up_cubes_to_be_neighbourhooded(cube, mask_cube=None):
'\n Set up a cube ready for neighourhooding the data.\n\n Args:\n cube (iris.cube.Cube):\n Cube that will be checked for whether the data is masked\n or nan. The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n mask_cube (iris.cube.Cube):\n Input Cube containing the array to be used as a mask.\n\n Returns:\n (tuple): tuple containing:\n **cube** (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n **mask** (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n **nan_array** (numpy.ndarray):\n numpy array to be used to set the values within\n the data of the output cube to be NaN.\n\n '
if (not mask_cube):
mask = cube.copy()
mask.data = np.real(np.ones_like(mask.data))
else:
mask = mask_cube
if isinstance(cube.data, np.ma.MaskedArray):
index = np.where((cube.data.mask.astype(int) == 1))
mask.data[index] = 0.0
cube.data = cube.data.data
mask.rename('mask_data')
cube = iris.util.squeeze(cube)
mask = iris.util.squeeze(mask)
nan_array = np.isnan(cube.data)
mask.data[nan_array] = 0.0
cube.data[nan_array] = 0.0
cube.data = (cube.data * mask.data).astype(cube.data.dtype)
return (cube, mask, nan_array) | @staticmethod
def set_up_cubes_to_be_neighbourhooded(cube, mask_cube=None):
'\n Set up a cube ready for neighourhooding the data.\n\n Args:\n cube (iris.cube.Cube):\n Cube that will be checked for whether the data is masked\n or nan. The cube should contain only x and y dimensions,\n so will generally be a slice of a cube.\n mask_cube (iris.cube.Cube):\n Input Cube containing the array to be used as a mask.\n\n Returns:\n (tuple): tuple containing:\n **cube** (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n **mask** (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n **nan_array** (numpy.ndarray):\n numpy array to be used to set the values within\n the data of the output cube to be NaN.\n\n '
if (not mask_cube):
mask = cube.copy()
mask.data = np.real(np.ones_like(mask.data))
else:
mask = mask_cube
if isinstance(cube.data, np.ma.MaskedArray):
index = np.where((cube.data.mask.astype(int) == 1))
mask.data[index] = 0.0
cube.data = cube.data.data
mask.rename('mask_data')
cube = iris.util.squeeze(cube)
mask = iris.util.squeeze(mask)
nan_array = np.isnan(cube.data)
mask.data[nan_array] = 0.0
cube.data[nan_array] = 0.0
cube.data = (cube.data * mask.data).astype(cube.data.dtype)
return (cube, mask, nan_array)<|docstring|>Set up a cube ready for neighourhooding the data.
Args:
cube (iris.cube.Cube):
Cube that will be checked for whether the data is masked
or nan. The cube should contain only x and y dimensions,
so will generally be a slice of a cube.
mask_cube (iris.cube.Cube):
Input Cube containing the array to be used as a mask.
Returns:
(tuple): tuple containing:
**cube** (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
**mask** (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
**nan_array** (numpy.ndarray):
numpy array to be used to set the values within
the data of the output cube to be NaN.<|endoftext|> |
b767da70844289ab489dc551927ecc3b73163b42bcfb62409dcd4d59611b468c | def _pad_and_calculate_neighbourhood(self, cube, mask, grid_cells):
'\n Apply neighbourhood processing consisting of the following steps:\n\n 1. Pad a halo around the input cube to allow vectorised\n neighbourhooding at edgepoints.\n 2. Cumulate the array along the x and y axes.\n 3. Apply neighbourhood processing to the cumulated array.\n\n Args:\n cube (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n mask (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n grid_cells (float or int):\n The number of grid cells along the x axis used to create a\n square neighbourhood.\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied with halo added.\n '
padded_cube = pad_cube_with_halo(cube, (grid_cells + 1), (grid_cells + 1))
padded_mask = pad_cube_with_halo(mask, (grid_cells + 1), (grid_cells + 1))
is_complex = np.any(np.iscomplex(cube.data))
summed_up_cube = self.cumulate_array(padded_cube, is_complex)
summed_up_mask = self.cumulate_array(padded_mask)
neighbourhood_averaged_cube = self.mean_over_neighbourhood(summed_up_cube, summed_up_mask, grid_cells, is_complex)
if (neighbourhood_averaged_cube.dtype in [np.float64, np.longdouble]):
neighbourhood_averaged_cube.data = neighbourhood_averaged_cube.data.astype(np.float32)
return neighbourhood_averaged_cube | Apply neighbourhood processing consisting of the following steps:
1. Pad a halo around the input cube to allow vectorised
neighbourhooding at edgepoints.
2. Cumulate the array along the x and y axes.
3. Apply neighbourhood processing to the cumulated array.
Args:
cube (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
mask (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
grid_cells (float or int):
The number of grid cells along the x axis used to create a
square neighbourhood.
Returns:
iris.cube.Cube:
Cube containing the smoothed field after the square
neighbourhood method has been applied with halo added. | improver/nbhood/square_kernel.py | _pad_and_calculate_neighbourhood | ddlddl58/improver | 1 | python | def _pad_and_calculate_neighbourhood(self, cube, mask, grid_cells):
'\n Apply neighbourhood processing consisting of the following steps:\n\n 1. Pad a halo around the input cube to allow vectorised\n neighbourhooding at edgepoints.\n 2. Cumulate the array along the x and y axes.\n 3. Apply neighbourhood processing to the cumulated array.\n\n Args:\n cube (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n mask (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n grid_cells (float or int):\n The number of grid cells along the x axis used to create a\n square neighbourhood.\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied with halo added.\n '
padded_cube = pad_cube_with_halo(cube, (grid_cells + 1), (grid_cells + 1))
padded_mask = pad_cube_with_halo(mask, (grid_cells + 1), (grid_cells + 1))
is_complex = np.any(np.iscomplex(cube.data))
summed_up_cube = self.cumulate_array(padded_cube, is_complex)
summed_up_mask = self.cumulate_array(padded_mask)
neighbourhood_averaged_cube = self.mean_over_neighbourhood(summed_up_cube, summed_up_mask, grid_cells, is_complex)
if (neighbourhood_averaged_cube.dtype in [np.float64, np.longdouble]):
neighbourhood_averaged_cube.data = neighbourhood_averaged_cube.data.astype(np.float32)
return neighbourhood_averaged_cube | def _pad_and_calculate_neighbourhood(self, cube, mask, grid_cells):
'\n Apply neighbourhood processing consisting of the following steps:\n\n 1. Pad a halo around the input cube to allow vectorised\n neighbourhooding at edgepoints.\n 2. Cumulate the array along the x and y axes.\n 3. Apply neighbourhood processing to the cumulated array.\n\n Args:\n cube (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n mask (iris.cube.Cube):\n Cube with masked or NaN values set to 0.0\n grid_cells (float or int):\n The number of grid cells along the x axis used to create a\n square neighbourhood.\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied with halo added.\n '
padded_cube = pad_cube_with_halo(cube, (grid_cells + 1), (grid_cells + 1))
padded_mask = pad_cube_with_halo(mask, (grid_cells + 1), (grid_cells + 1))
is_complex = np.any(np.iscomplex(cube.data))
summed_up_cube = self.cumulate_array(padded_cube, is_complex)
summed_up_mask = self.cumulate_array(padded_mask)
neighbourhood_averaged_cube = self.mean_over_neighbourhood(summed_up_cube, summed_up_mask, grid_cells, is_complex)
if (neighbourhood_averaged_cube.dtype in [np.float64, np.longdouble]):
neighbourhood_averaged_cube.data = neighbourhood_averaged_cube.data.astype(np.float32)
return neighbourhood_averaged_cube<|docstring|>Apply neighbourhood processing consisting of the following steps:
1. Pad a halo around the input cube to allow vectorised
neighbourhooding at edgepoints.
2. Cumulate the array along the x and y axes.
3. Apply neighbourhood processing to the cumulated array.
Args:
cube (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
mask (iris.cube.Cube):
Cube with masked or NaN values set to 0.0
grid_cells (float or int):
The number of grid cells along the x axis used to create a
square neighbourhood.
Returns:
iris.cube.Cube:
Cube containing the smoothed field after the square
neighbourhood method has been applied with halo added.<|endoftext|> |
4038a59b11590041ea92f1fba645e0d0fa8f93d4198c2fd8d02768dc0c038c12 | def _remove_padding_and_mask(self, neighbourhood_averaged_cube, original_cube, mask, grid_cells):
'\n Remove the halo from the padded array and apply the mask, if required.\n If fraction option set, clip the data so values lie within\n the range of the original cube.\n\n Args:\n neighbourhood_averaged_cube (iris.cube.Cube):\n Cube containing the smoothed field after the square\n neighbourhood method has been applied.\n original_cube (iris.cube.Cube or None):\n The original cube slice.\n mask (iris.cube.Cube):\n The mask cube created by set_up_cubes_to_be_neighbourhooded.\n grid_cells (float or int):\n The number of grid cells used to create a square\n neighbourhood (assuming an equal area grid).\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied and halo removed.\n '
neighbourhood_averaged_cube = remove_halo_from_cube(neighbourhood_averaged_cube, (grid_cells + 1), (grid_cells + 1))
if (self.re_mask and (mask.data.min() < 1.0)):
neighbourhood_averaged_cube.data = np.ma.masked_array(neighbourhood_averaged_cube.data, mask=np.logical_not(mask.data.squeeze()))
if (self.sum_or_fraction == 'fraction'):
min_val = np.nanmin(original_cube.data)
max_val = np.nanmax(original_cube.data)
neighbourhood_averaged_cube = clip_cube_data(neighbourhood_averaged_cube, min_val, max_val)
return neighbourhood_averaged_cube | Remove the halo from the padded array and apply the mask, if required.
If fraction option set, clip the data so values lie within
the range of the original cube.
Args:
neighbourhood_averaged_cube (iris.cube.Cube):
Cube containing the smoothed field after the square
neighbourhood method has been applied.
original_cube (iris.cube.Cube or None):
The original cube slice.
mask (iris.cube.Cube):
The mask cube created by set_up_cubes_to_be_neighbourhooded.
grid_cells (float or int):
The number of grid cells used to create a square
neighbourhood (assuming an equal area grid).
Returns:
iris.cube.Cube:
Cube containing the smoothed field after the square
neighbourhood method has been applied and halo removed. | improver/nbhood/square_kernel.py | _remove_padding_and_mask | ddlddl58/improver | 1 | python | def _remove_padding_and_mask(self, neighbourhood_averaged_cube, original_cube, mask, grid_cells):
'\n Remove the halo from the padded array and apply the mask, if required.\n If fraction option set, clip the data so values lie within\n the range of the original cube.\n\n Args:\n neighbourhood_averaged_cube (iris.cube.Cube):\n Cube containing the smoothed field after the square\n neighbourhood method has been applied.\n original_cube (iris.cube.Cube or None):\n The original cube slice.\n mask (iris.cube.Cube):\n The mask cube created by set_up_cubes_to_be_neighbourhooded.\n grid_cells (float or int):\n The number of grid cells used to create a square\n neighbourhood (assuming an equal area grid).\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied and halo removed.\n '
neighbourhood_averaged_cube = remove_halo_from_cube(neighbourhood_averaged_cube, (grid_cells + 1), (grid_cells + 1))
if (self.re_mask and (mask.data.min() < 1.0)):
neighbourhood_averaged_cube.data = np.ma.masked_array(neighbourhood_averaged_cube.data, mask=np.logical_not(mask.data.squeeze()))
if (self.sum_or_fraction == 'fraction'):
min_val = np.nanmin(original_cube.data)
max_val = np.nanmax(original_cube.data)
neighbourhood_averaged_cube = clip_cube_data(neighbourhood_averaged_cube, min_val, max_val)
return neighbourhood_averaged_cube | def _remove_padding_and_mask(self, neighbourhood_averaged_cube, original_cube, mask, grid_cells):
'\n Remove the halo from the padded array and apply the mask, if required.\n If fraction option set, clip the data so values lie within\n the range of the original cube.\n\n Args:\n neighbourhood_averaged_cube (iris.cube.Cube):\n Cube containing the smoothed field after the square\n neighbourhood method has been applied.\n original_cube (iris.cube.Cube or None):\n The original cube slice.\n mask (iris.cube.Cube):\n The mask cube created by set_up_cubes_to_be_neighbourhooded.\n grid_cells (float or int):\n The number of grid cells used to create a square\n neighbourhood (assuming an equal area grid).\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied and halo removed.\n '
neighbourhood_averaged_cube = remove_halo_from_cube(neighbourhood_averaged_cube, (grid_cells + 1), (grid_cells + 1))
if (self.re_mask and (mask.data.min() < 1.0)):
neighbourhood_averaged_cube.data = np.ma.masked_array(neighbourhood_averaged_cube.data, mask=np.logical_not(mask.data.squeeze()))
if (self.sum_or_fraction == 'fraction'):
min_val = np.nanmin(original_cube.data)
max_val = np.nanmax(original_cube.data)
neighbourhood_averaged_cube = clip_cube_data(neighbourhood_averaged_cube, min_val, max_val)
return neighbourhood_averaged_cube<|docstring|>Remove the halo from the padded array and apply the mask, if required.
If fraction option set, clip the data so values lie within
the range of the original cube.
Args:
neighbourhood_averaged_cube (iris.cube.Cube):
Cube containing the smoothed field after the square
neighbourhood method has been applied.
original_cube (iris.cube.Cube or None):
The original cube slice.
mask (iris.cube.Cube):
The mask cube created by set_up_cubes_to_be_neighbourhooded.
grid_cells (float or int):
The number of grid cells used to create a square
neighbourhood (assuming an equal area grid).
Returns:
iris.cube.Cube:
Cube containing the smoothed field after the square
neighbourhood method has been applied and halo removed.<|endoftext|> |
f0909a321236ee6c95fe44f5e84fdfc46d3f4d399511edea5fe0a28413aedf89 | def run(self, cube, radius, mask_cube=None):
'\n Call the methods required to apply a square neighbourhood\n method to a cube.\n\n The steps undertaken are:\n\n 1. Set up cubes by determining, if the arrays are masked.\n 2. Pad the input array with a halo and then calculate the neighbourhood\n of the haloed array.\n 3. Remove the halo from the neighbourhooded array and deal with a mask,\n if required.\n\n Args:\n cube (iris.cube.Cube):\n Cube containing the array to which the square neighbourhood\n will be applied.\n radius (float):\n Radius in metres for use in specifying the number of\n grid cells used to create a square neighbourhood.\n mask_cube (iris.cube.Cube):\n Cube containing the array to be used as a mask.\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied.\n '
check_radius_against_distance(cube, radius)
original_attributes = cube.attributes
original_methods = cube.cell_methods
grid_cells = distance_to_number_of_grid_cells(cube, radius)
result_slices = iris.cube.CubeList()
for cube_slice in cube.slices([cube.coord(axis='y'), cube.coord(axis='x')]):
(cube_slice, mask, nan_array) = self.set_up_cubes_to_be_neighbourhooded(cube_slice, mask_cube)
neighbourhood_averaged_cube = self._pad_and_calculate_neighbourhood(cube_slice, mask, grid_cells)
neighbourhood_averaged_cube = self._remove_padding_and_mask(neighbourhood_averaged_cube, cube_slice, mask, grid_cells)
neighbourhood_averaged_cube.data[nan_array.astype(bool)] = np.nan
result_slices.append(neighbourhood_averaged_cube)
neighbourhood_averaged_cube = result_slices.merge_cube()
neighbourhood_averaged_cube.cell_methods = original_methods
neighbourhood_averaged_cube.attributes = original_attributes
neighbourhood_averaged_cube = check_cube_coordinates(cube, neighbourhood_averaged_cube)
return neighbourhood_averaged_cube | Call the methods required to apply a square neighbourhood
method to a cube.
The steps undertaken are:
1. Set up cubes by determining, if the arrays are masked.
2. Pad the input array with a halo and then calculate the neighbourhood
of the haloed array.
3. Remove the halo from the neighbourhooded array and deal with a mask,
if required.
Args:
cube (iris.cube.Cube):
Cube containing the array to which the square neighbourhood
will be applied.
radius (float):
Radius in metres for use in specifying the number of
grid cells used to create a square neighbourhood.
mask_cube (iris.cube.Cube):
Cube containing the array to be used as a mask.
Returns:
iris.cube.Cube:
Cube containing the smoothed field after the square
neighbourhood method has been applied. | improver/nbhood/square_kernel.py | run | ddlddl58/improver | 1 | python | def run(self, cube, radius, mask_cube=None):
'\n Call the methods required to apply a square neighbourhood\n method to a cube.\n\n The steps undertaken are:\n\n 1. Set up cubes by determining, if the arrays are masked.\n 2. Pad the input array with a halo and then calculate the neighbourhood\n of the haloed array.\n 3. Remove the halo from the neighbourhooded array and deal with a mask,\n if required.\n\n Args:\n cube (iris.cube.Cube):\n Cube containing the array to which the square neighbourhood\n will be applied.\n radius (float):\n Radius in metres for use in specifying the number of\n grid cells used to create a square neighbourhood.\n mask_cube (iris.cube.Cube):\n Cube containing the array to be used as a mask.\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied.\n '
check_radius_against_distance(cube, radius)
original_attributes = cube.attributes
original_methods = cube.cell_methods
grid_cells = distance_to_number_of_grid_cells(cube, radius)
result_slices = iris.cube.CubeList()
for cube_slice in cube.slices([cube.coord(axis='y'), cube.coord(axis='x')]):
(cube_slice, mask, nan_array) = self.set_up_cubes_to_be_neighbourhooded(cube_slice, mask_cube)
neighbourhood_averaged_cube = self._pad_and_calculate_neighbourhood(cube_slice, mask, grid_cells)
neighbourhood_averaged_cube = self._remove_padding_and_mask(neighbourhood_averaged_cube, cube_slice, mask, grid_cells)
neighbourhood_averaged_cube.data[nan_array.astype(bool)] = np.nan
result_slices.append(neighbourhood_averaged_cube)
neighbourhood_averaged_cube = result_slices.merge_cube()
neighbourhood_averaged_cube.cell_methods = original_methods
neighbourhood_averaged_cube.attributes = original_attributes
neighbourhood_averaged_cube = check_cube_coordinates(cube, neighbourhood_averaged_cube)
return neighbourhood_averaged_cube | def run(self, cube, radius, mask_cube=None):
'\n Call the methods required to apply a square neighbourhood\n method to a cube.\n\n The steps undertaken are:\n\n 1. Set up cubes by determining, if the arrays are masked.\n 2. Pad the input array with a halo and then calculate the neighbourhood\n of the haloed array.\n 3. Remove the halo from the neighbourhooded array and deal with a mask,\n if required.\n\n Args:\n cube (iris.cube.Cube):\n Cube containing the array to which the square neighbourhood\n will be applied.\n radius (float):\n Radius in metres for use in specifying the number of\n grid cells used to create a square neighbourhood.\n mask_cube (iris.cube.Cube):\n Cube containing the array to be used as a mask.\n\n Returns:\n iris.cube.Cube:\n Cube containing the smoothed field after the square\n neighbourhood method has been applied.\n '
check_radius_against_distance(cube, radius)
original_attributes = cube.attributes
original_methods = cube.cell_methods
grid_cells = distance_to_number_of_grid_cells(cube, radius)
result_slices = iris.cube.CubeList()
for cube_slice in cube.slices([cube.coord(axis='y'), cube.coord(axis='x')]):
(cube_slice, mask, nan_array) = self.set_up_cubes_to_be_neighbourhooded(cube_slice, mask_cube)
neighbourhood_averaged_cube = self._pad_and_calculate_neighbourhood(cube_slice, mask, grid_cells)
neighbourhood_averaged_cube = self._remove_padding_and_mask(neighbourhood_averaged_cube, cube_slice, mask, grid_cells)
neighbourhood_averaged_cube.data[nan_array.astype(bool)] = np.nan
result_slices.append(neighbourhood_averaged_cube)
neighbourhood_averaged_cube = result_slices.merge_cube()
neighbourhood_averaged_cube.cell_methods = original_methods
neighbourhood_averaged_cube.attributes = original_attributes
neighbourhood_averaged_cube = check_cube_coordinates(cube, neighbourhood_averaged_cube)
return neighbourhood_averaged_cube<|docstring|>Call the methods required to apply a square neighbourhood
method to a cube.
The steps undertaken are:
1. Set up cubes by determining, if the arrays are masked.
2. Pad the input array with a halo and then calculate the neighbourhood
of the haloed array.
3. Remove the halo from the neighbourhooded array and deal with a mask,
if required.
Args:
cube (iris.cube.Cube):
Cube containing the array to which the square neighbourhood
will be applied.
radius (float):
Radius in metres for use in specifying the number of
grid cells used to create a square neighbourhood.
mask_cube (iris.cube.Cube):
Cube containing the array to be used as a mask.
Returns:
iris.cube.Cube:
Cube containing the smoothed field after the square
neighbourhood method has been applied.<|endoftext|> |
d0d2c364f06ef2c996af42ed65b3cb798a931233f0ec2fe4aa178369d2ea551d | def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
'Runs the network request through the client\'s chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest("GET", "https://www.example.org/")\n <HttpRequest [GET], url: \'https://www.example.org/\'>\n >>> response = await client._send_request(request)\n <AsyncHttpResponse: 200 OK>\n\n For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.AsyncHttpResponse\n '
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_10_01_preview/aio/_compute_management_client.py | _send_request | AikoBB/azure-sdk-for-python | 1 | python | def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
'Runs the network request through the client\'s chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest("GET", "https://www.example.org/")\n <HttpRequest [GET], url: \'https://www.example.org/\'>\n >>> response = await client._send_request(request)\n <AsyncHttpResponse: 200 OK>\n\n For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.AsyncHttpResponse\n '
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
'Runs the network request through the client\'s chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest("GET", "https://www.example.org/")\n <HttpRequest [GET], url: \'https://www.example.org/\'>\n >>> response = await client._send_request(request)\n <AsyncHttpResponse: 200 OK>\n\n For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.AsyncHttpResponse\n '
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)<|docstring|>Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse<|endoftext|> |
91959f2cd03c0b45f4cd98dce157e355a38a0d05591e875a5a9138157611d6fc | @abc.abstractmethod
def validate(self):
'Ensure the command is valid.' | Ensure the command is valid. | src/shrub/operations.py | validate | zamj/shrub.py | 1 | python | @abc.abstractmethod
def validate(self):
| @abc.abstractmethod
def validate(self):
<|docstring|>Ensure the command is valid.<|endoftext|> |
b50b2675d535d43f6af11bad35a73368a4d6e2969599072ce9646153e48017e7 | @abc.abstractmethod
def _command_type(self):
'Return the type of this command.' | Return the type of this command. | src/shrub/operations.py | _command_type | zamj/shrub.py | 1 | python | @abc.abstractmethod
def _command_type(self):
| @abc.abstractmethod
def _command_type(self):
<|docstring|>Return the type of this command.<|endoftext|> |
7c7c61b4bcbed0358d0a613e707ba7c0c36111b8b142b01f57222fa08f99980e | @abc.abstractmethod
def _param_list(self):
'Return a map of the params supported by this command.' | Return a map of the params supported by this command. | src/shrub/operations.py | _param_list | zamj/shrub.py | 1 | python | @abc.abstractmethod
def _param_list(self):
| @abc.abstractmethod
def _param_list(self):
<|docstring|>Return a map of the params supported by this command.<|endoftext|> |
18a5fbde2e568b719e2699a9ddd65f4c4973d1a1c8073c90f1ffcdc30ce86546 | def _export_params(self):
'Return a map of the parameters for this command.'
obj = {}
self._add_defined_attribs(obj, self._param_list().keys())
return obj | Return a map of the parameters for this command. | src/shrub/operations.py | _export_params | zamj/shrub.py | 1 | python | def _export_params(self):
obj = {}
self._add_defined_attribs(obj, self._param_list().keys())
return obj | def _export_params(self):
obj = {}
self._add_defined_attribs(obj, self._param_list().keys())
return obj<|docstring|>Return a map of the parameters for this command.<|endoftext|> |
2c2c931f3777bc56952751c373edb95eade89db8dc7de42d9b6e5ab6e04ae9d2 | def _add_if_defined(self, obj, prop):
'Add the specified property to the given object if it exists.'
value = getattr(self, prop)
if value:
obj[self._param_list()[prop]] = value | Add the specified property to the given object if it exists. | src/shrub/operations.py | _add_if_defined | zamj/shrub.py | 1 | python | def _add_if_defined(self, obj, prop):
value = getattr(self, prop)
if value:
obj[self._param_list()[prop]] = value | def _add_if_defined(self, obj, prop):
value = getattr(self, prop)
if value:
obj[self._param_list()[prop]] = value<|docstring|>Add the specified property to the given object if it exists.<|endoftext|> |
fbfad5fedc0659510a23bc1ac5a01f8d895e793b4891f32721869085860d6e1e | def _add_defined_attribs(self, obj, attrib_list):
'Add any defined attributes in the given list to the given map.'
for attrib in attrib_list:
self._add_if_defined(obj, attrib) | Add any defined attributes in the given list to the given map. | src/shrub/operations.py | _add_defined_attribs | zamj/shrub.py | 1 | python | def _add_defined_attribs(self, obj, attrib_list):
for attrib in attrib_list:
self._add_if_defined(obj, attrib) | def _add_defined_attribs(self, obj, attrib_list):
for attrib in attrib_list:
self._add_if_defined(obj, attrib)<|docstring|>Add any defined attributes in the given list to the given map.<|endoftext|> |
c30378ef2443292eec45f5a1e43a5ebbcc1461bc6587ddf63b1752db02e8a43b | def resolve(self):
'Create a CommandDefinition from this object.'
cmd = CommandDefinition().command(self._command_type())
return cmd.params(self._export_params()) | Create a CommandDefinition from this object. | src/shrub/operations.py | resolve | zamj/shrub.py | 1 | python | def resolve(self):
cmd = CommandDefinition().command(self._command_type())
return cmd.params(self._export_params()) | def resolve(self):
cmd = CommandDefinition().command(self._command_type())
return cmd.params(self._export_params())<|docstring|>Create a CommandDefinition from this object.<|endoftext|> |
f233123cecd114e6cf6319b1ddf00683eb9043a73ba47286cf53565881f2cd1c | def read_history(file_loc):
'Uses the translate file to produce a catalog with the appropriate filters\n\n Parameters:\n file_loc (string): location of the folder containing the history.data file\n\n\n Returns:\n df (pd.DataFrame): Pandas dataframe with columsn as the labels provided by mesa\n '
df = ascii.read((file_loc + 'history.data'), header_start=4, data_start=5).to_pandas()
return df | Uses the translate file to produce a catalog with the appropriate filters
Parameters:
file_loc (string): location of the folder containing the history.data file
Returns:
df (pd.DataFrame): Pandas dataframe with columsn as the labels provided by mesa | courses/stars/ps1/read_mesa.py | read_history | brianlorenz/code | 0 | python | def read_history(file_loc):
'Uses the translate file to produce a catalog with the appropriate filters\n\n Parameters:\n file_loc (string): location of the folder containing the history.data file\n\n\n Returns:\n df (pd.DataFrame): Pandas dataframe with columsn as the labels provided by mesa\n '
df = ascii.read((file_loc + 'history.data'), header_start=4, data_start=5).to_pandas()
return df | def read_history(file_loc):
'Uses the translate file to produce a catalog with the appropriate filters\n\n Parameters:\n file_loc (string): location of the folder containing the history.data file\n\n\n Returns:\n df (pd.DataFrame): Pandas dataframe with columsn as the labels provided by mesa\n '
df = ascii.read((file_loc + 'history.data'), header_start=4, data_start=5).to_pandas()
return df<|docstring|>Uses the translate file to produce a catalog with the appropriate filters
Parameters:
file_loc (string): location of the folder containing the history.data file
Returns:
df (pd.DataFrame): Pandas dataframe with columsn as the labels provided by mesa<|endoftext|> |
d82403e70b9c423477a6c9d177566bdacdbdbee7539054b81179e5372bae0bc0 | def collect(self):
'\n Take a list of metrics, filter all metrics based on hostname, and metric_type\n For each metric, merge the corresponding csv files into one,update corresponding properties such as csv_column_map.\n Users can specify functions: raw, count (qps), sum (aggregated value), avg (averaged value)\n The timestamp granularity of aggregated submetrics is in seconds (sub-second is not supported)\n '
for aggr_metric in self.aggr_metrics:
functions_aggr = []
fields = aggr_metric.split(':')
cur_metric_type = fields[0].split('.')[0]
if (len(fields) > 1):
func_user = ''.join(fields[1].split())
functions_aggr.extend(func_user.split(','))
else:
return True
cur_column = '.'.join(fields[0].split('.')[1:])
aggr_data = {}
aggr_data['raw'] = []
aggr_data['sum'] = defaultdict(float)
aggr_data['count'] = defaultdict(int)
for metric in self.metrics:
if ((metric.hostname in self.aggr_hosts) and (cur_column in metric.csv_column_map.values())):
file_csv = metric.get_csv(cur_column)
timestamp_format = None
with open(file_csv) as fh:
for line in fh:
aggr_data['raw'].append(line.rstrip())
words = line.split(',')
ts = words[0].split('.')[0]
if ((not timestamp_format) or (timestamp_format == 'unknown')):
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if (timestamp_format == 'unknown'):
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
aggr_data['sum'][ts] += float(words[1])
aggr_data['count'][ts] += 1
if ('raw' in functions_aggr):
out_csv = self.get_csv(cur_column, 'raw')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
fh.write('\n'.join(sorted(aggr_data['raw'])))
if ('sum' in functions_aggr):
out_csv = self.get_csv(cur_column, 'sum')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['sum'].items()):
fh.write((((k + ',') + str(v)) + '\n'))
if ('avg' in functions_aggr):
out_csv = self.get_csv(cur_column, 'avg')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['sum'].items()):
fh.write((((k + ',') + str((v / aggr_data['count'][k]))) + '\n'))
if ('count' in functions_aggr):
out_csv = self.get_csv(cur_column, 'count')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['count'].items()):
fh.write((((k + ',') + str(v)) + '\n'))
gc.collect()
return True | Take a list of metrics, filter all metrics based on hostname, and metric_type
For each metric, merge the corresponding csv files into one,update corresponding properties such as csv_column_map.
Users can specify functions: raw, count (qps), sum (aggregated value), avg (averaged value)
The timestamp granularity of aggregated submetrics is in seconds (sub-second is not supported) | src/naarad/metrics/cluster_metric.py | collect | richardhsu/naarad | 180 | python | def collect(self):
'\n Take a list of metrics, filter all metrics based on hostname, and metric_type\n For each metric, merge the corresponding csv files into one,update corresponding properties such as csv_column_map.\n Users can specify functions: raw, count (qps), sum (aggregated value), avg (averaged value)\n The timestamp granularity of aggregated submetrics is in seconds (sub-second is not supported)\n '
for aggr_metric in self.aggr_metrics:
functions_aggr = []
fields = aggr_metric.split(':')
cur_metric_type = fields[0].split('.')[0]
if (len(fields) > 1):
func_user = .join(fields[1].split())
functions_aggr.extend(func_user.split(','))
else:
return True
cur_column = '.'.join(fields[0].split('.')[1:])
aggr_data = {}
aggr_data['raw'] = []
aggr_data['sum'] = defaultdict(float)
aggr_data['count'] = defaultdict(int)
for metric in self.metrics:
if ((metric.hostname in self.aggr_hosts) and (cur_column in metric.csv_column_map.values())):
file_csv = metric.get_csv(cur_column)
timestamp_format = None
with open(file_csv) as fh:
for line in fh:
aggr_data['raw'].append(line.rstrip())
words = line.split(',')
ts = words[0].split('.')[0]
if ((not timestamp_format) or (timestamp_format == 'unknown')):
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if (timestamp_format == 'unknown'):
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
aggr_data['sum'][ts] += float(words[1])
aggr_data['count'][ts] += 1
if ('raw' in functions_aggr):
out_csv = self.get_csv(cur_column, 'raw')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
fh.write('\n'.join(sorted(aggr_data['raw'])))
if ('sum' in functions_aggr):
out_csv = self.get_csv(cur_column, 'sum')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['sum'].items()):
fh.write((((k + ',') + str(v)) + '\n'))
if ('avg' in functions_aggr):
out_csv = self.get_csv(cur_column, 'avg')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['sum'].items()):
fh.write((((k + ',') + str((v / aggr_data['count'][k]))) + '\n'))
if ('count' in functions_aggr):
out_csv = self.get_csv(cur_column, 'count')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['count'].items()):
fh.write((((k + ',') + str(v)) + '\n'))
gc.collect()
return True | def collect(self):
'\n Take a list of metrics, filter all metrics based on hostname, and metric_type\n For each metric, merge the corresponding csv files into one,update corresponding properties such as csv_column_map.\n Users can specify functions: raw, count (qps), sum (aggregated value), avg (averaged value)\n The timestamp granularity of aggregated submetrics is in seconds (sub-second is not supported)\n '
for aggr_metric in self.aggr_metrics:
functions_aggr = []
fields = aggr_metric.split(':')
cur_metric_type = fields[0].split('.')[0]
if (len(fields) > 1):
func_user = .join(fields[1].split())
functions_aggr.extend(func_user.split(','))
else:
return True
cur_column = '.'.join(fields[0].split('.')[1:])
aggr_data = {}
aggr_data['raw'] = []
aggr_data['sum'] = defaultdict(float)
aggr_data['count'] = defaultdict(int)
for metric in self.metrics:
if ((metric.hostname in self.aggr_hosts) and (cur_column in metric.csv_column_map.values())):
file_csv = metric.get_csv(cur_column)
timestamp_format = None
with open(file_csv) as fh:
for line in fh:
aggr_data['raw'].append(line.rstrip())
words = line.split(',')
ts = words[0].split('.')[0]
if ((not timestamp_format) or (timestamp_format == 'unknown')):
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if (timestamp_format == 'unknown'):
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
aggr_data['sum'][ts] += float(words[1])
aggr_data['count'][ts] += 1
if ('raw' in functions_aggr):
out_csv = self.get_csv(cur_column, 'raw')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
fh.write('\n'.join(sorted(aggr_data['raw'])))
if ('sum' in functions_aggr):
out_csv = self.get_csv(cur_column, 'sum')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['sum'].items()):
fh.write((((k + ',') + str(v)) + '\n'))
if ('avg' in functions_aggr):
out_csv = self.get_csv(cur_column, 'avg')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['sum'].items()):
fh.write((((k + ',') + str((v / aggr_data['count'][k]))) + '\n'))
if ('count' in functions_aggr):
out_csv = self.get_csv(cur_column, 'count')
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
for (k, v) in sorted(aggr_data['count'].items()):
fh.write((((k + ',') + str(v)) + '\n'))
gc.collect()
return True<|docstring|>Take a list of metrics, filter all metrics based on hostname, and metric_type
For each metric, merge the corresponding csv files into one,update corresponding properties such as csv_column_map.
Users can specify functions: raw, count (qps), sum (aggregated value), avg (averaged value)
The timestamp granularity of aggregated submetrics is in seconds (sub-second is not supported)<|endoftext|> |
94ffdc136cf84d0689e58ed1a547dedcdad9efcd886ccf2f6b3d1fae40f86bd1 | def parse(self):
"\n Merge multiple hosts' csv into one csv file. This approach has the benefit of reusing calculate_stats(), but with the penalty of reading the single csv\n later for calculate_stats(). However, since file cache will cache the newly written csv files, reading the csv file will not likely be a IO bottleneck.\n "
return True | Merge multiple hosts' csv into one csv file. This approach has the benefit of reusing calculate_stats(), but with the penalty of reading the single csv
later for calculate_stats(). However, since file cache will cache the newly written csv files, reading the csv file will not likely be a IO bottleneck. | src/naarad/metrics/cluster_metric.py | parse | richardhsu/naarad | 180 | python | def parse(self):
"\n Merge multiple hosts' csv into one csv file. This approach has the benefit of reusing calculate_stats(), but with the penalty of reading the single csv\n later for calculate_stats(). However, since file cache will cache the newly written csv files, reading the csv file will not likely be a IO bottleneck.\n "
return True | def parse(self):
"\n Merge multiple hosts' csv into one csv file. This approach has the benefit of reusing calculate_stats(), but with the penalty of reading the single csv\n later for calculate_stats(). However, since file cache will cache the newly written csv files, reading the csv file will not likely be a IO bottleneck.\n "
return True<|docstring|>Merge multiple hosts' csv into one csv file. This approach has the benefit of reusing calculate_stats(), but with the penalty of reading the single csv
later for calculate_stats(). However, since file cache will cache the newly written csv files, reading the csv file will not likely be a IO bottleneck.<|endoftext|> |
56e39be3c5d0563e5f6553f6dcaa6ea8cc5407dd94a3497b9cfde68a4514653e | def plugin_info_map(self):
'Returns a map with information about the plugin.'
result = collections.OrderedDict()
result['name'] = self.name
result['doc'] = inspect.getdoc(self.module())
result['path'] = self.module_abs_path()
result['from manifest'] = self._json_path
return result | Returns a map with information about the plugin. | src/py-opentimelineio/opentimelineio/plugins/python_plugin.py | plugin_info_map | rdaniels29/OpenTimelineIO | 1,021 | python | def plugin_info_map(self):
result = collections.OrderedDict()
result['name'] = self.name
result['doc'] = inspect.getdoc(self.module())
result['path'] = self.module_abs_path()
result['from manifest'] = self._json_path
return result | def plugin_info_map(self):
result = collections.OrderedDict()
result['name'] = self.name
result['doc'] = inspect.getdoc(self.module())
result['path'] = self.module_abs_path()
result['from manifest'] = self._json_path
return result<|docstring|>Returns a map with information about the plugin.<|endoftext|> |
4e50b5d56bc4754a5750b31b6eb5ef4f73e5e8017cd83f55fc24fbff82f39df4 | def module_abs_path(self):
'Return an absolute path to the module implementing this adapter.'
filepath = self.filepath
if (not os.path.isabs(filepath)):
if (not self._json_path):
raise exceptions.MisconfiguredPluginError('{} plugin is misconfigured, missing json path. plugin: {}'.format(self.name, repr(self)))
filepath = os.path.join(os.path.dirname(self._json_path), filepath)
return filepath | Return an absolute path to the module implementing this adapter. | src/py-opentimelineio/opentimelineio/plugins/python_plugin.py | module_abs_path | rdaniels29/OpenTimelineIO | 1,021 | python | def module_abs_path(self):
filepath = self.filepath
if (not os.path.isabs(filepath)):
if (not self._json_path):
raise exceptions.MisconfiguredPluginError('{} plugin is misconfigured, missing json path. plugin: {}'.format(self.name, repr(self)))
filepath = os.path.join(os.path.dirname(self._json_path), filepath)
return filepath | def module_abs_path(self):
filepath = self.filepath
if (not os.path.isabs(filepath)):
if (not self._json_path):
raise exceptions.MisconfiguredPluginError('{} plugin is misconfigured, missing json path. plugin: {}'.format(self.name, repr(self)))
filepath = os.path.join(os.path.dirname(self._json_path), filepath)
return filepath<|docstring|>Return an absolute path to the module implementing this adapter.<|endoftext|> |
f05289d23c67da1a027f2865a7b6409abde71157fb1bcf458f7076fab589fe24 | def _imported_module(self, namespace):
'Load the module this plugin points at.'
pyname = os.path.splitext(os.path.basename(self.module_abs_path()))[0]
pydir = os.path.dirname(self.module_abs_path())
(file_obj, pathname, description) = imp.find_module(pyname, [pydir])
with file_obj:
mod = imp.load_module('opentimelineio.{}.{}'.format(namespace, self.name), file_obj, pathname, description)
return mod | Load the module this plugin points at. | src/py-opentimelineio/opentimelineio/plugins/python_plugin.py | _imported_module | rdaniels29/OpenTimelineIO | 1,021 | python | def _imported_module(self, namespace):
pyname = os.path.splitext(os.path.basename(self.module_abs_path()))[0]
pydir = os.path.dirname(self.module_abs_path())
(file_obj, pathname, description) = imp.find_module(pyname, [pydir])
with file_obj:
mod = imp.load_module('opentimelineio.{}.{}'.format(namespace, self.name), file_obj, pathname, description)
return mod | def _imported_module(self, namespace):
pyname = os.path.splitext(os.path.basename(self.module_abs_path()))[0]
pydir = os.path.dirname(self.module_abs_path())
(file_obj, pathname, description) = imp.find_module(pyname, [pydir])
with file_obj:
mod = imp.load_module('opentimelineio.{}.{}'.format(namespace, self.name), file_obj, pathname, description)
return mod<|docstring|>Load the module this plugin points at.<|endoftext|> |
e5cf20fab8a047a1541144ab37b55ce8ccbd89c7789487add823217ab977dac5 | def module(self):
'Return the module object for this adapter. '
if (not self._module):
self._module = self._imported_module('adapters')
return self._module | Return the module object for this adapter. | src/py-opentimelineio/opentimelineio/plugins/python_plugin.py | module | rdaniels29/OpenTimelineIO | 1,021 | python | def module(self):
' '
if (not self._module):
self._module = self._imported_module('adapters')
return self._module | def module(self):
' '
if (not self._module):
self._module = self._imported_module('adapters')
return self._module<|docstring|>Return the module object for this adapter.<|endoftext|> |
43fc99ab989cea45a8c9ea27a6fad23bcd6978ed4f8d39742f4335fec6c3d769 | def _execute_function(self, func_name, **kwargs):
'Execute func_name on this adapter with error checking.'
if (not hasattr(self.module(), func_name)):
raise exceptions.AdapterDoesntSupportFunctionError("Sorry, {} doesn't support {}.".format(self.name, func_name))
return getattr(self.module(), func_name)(**kwargs) | Execute func_name on this adapter with error checking. | src/py-opentimelineio/opentimelineio/plugins/python_plugin.py | _execute_function | rdaniels29/OpenTimelineIO | 1,021 | python | def _execute_function(self, func_name, **kwargs):
if (not hasattr(self.module(), func_name)):
raise exceptions.AdapterDoesntSupportFunctionError("Sorry, {} doesn't support {}.".format(self.name, func_name))
return getattr(self.module(), func_name)(**kwargs) | def _execute_function(self, func_name, **kwargs):
if (not hasattr(self.module(), func_name)):
raise exceptions.AdapterDoesntSupportFunctionError("Sorry, {} doesn't support {}.".format(self.name, func_name))
return getattr(self.module(), func_name)(**kwargs)<|docstring|>Execute func_name on this adapter with error checking.<|endoftext|> |
5e514813531c573763a4745ef587a13f9244b69e0342f8f29989b2fd44d55c9a | def process_isbn(self):
' Controller function; manages preparation and saving of data to db.\n Called by cron-job or, eventually perhaps, rq worker. '
tracker_record = self.find_record_to_process()
processor.prepare_alternates(tracker_record)
processor.prepare_filtered_alternates(tracker_record)
processor.prepare_brown_filtered_alternates(tracker_record)
return | Controller function; manages preparation and saving of data to db.
Called by cron-job or, eventually perhaps, rq worker. | xisbn_app/lib/investigator_controller.py | process_isbn | birkin/xisbn_project | 0 | python | def process_isbn(self):
' Controller function; manages preparation and saving of data to db.\n Called by cron-job or, eventually perhaps, rq worker. '
tracker_record = self.find_record_to_process()
processor.prepare_alternates(tracker_record)
processor.prepare_filtered_alternates(tracker_record)
processor.prepare_brown_filtered_alternates(tracker_record)
return | def process_isbn(self):
' Controller function; manages preparation and saving of data to db.\n Called by cron-job or, eventually perhaps, rq worker. '
tracker_record = self.find_record_to_process()
processor.prepare_alternates(tracker_record)
processor.prepare_filtered_alternates(tracker_record)
processor.prepare_brown_filtered_alternates(tracker_record)
return<|docstring|>Controller function; manages preparation and saving of data to db.
Called by cron-job or, eventually perhaps, rq worker.<|endoftext|> |
fa9b68e37ed2815f40de38786fda9b4ade3740d502f82bee75ed899691c9897f | def find_record_to_process(self):
' Grabs tracker record.\n Called by process_isbn() '
trckr = XisbnTracker.objects.order_by('bfa_last_changed_date')[0]
log.debug(('trckr.canonical_isbn, `%s`' % trckr.canonical_isbn))
return trckr | Grabs tracker record.
Called by process_isbn() | xisbn_app/lib/investigator_controller.py | find_record_to_process | birkin/xisbn_project | 0 | python | def find_record_to_process(self):
' Grabs tracker record.\n Called by process_isbn() '
trckr = XisbnTracker.objects.order_by('bfa_last_changed_date')[0]
log.debug(('trckr.canonical_isbn, `%s`' % trckr.canonical_isbn))
return trckr | def find_record_to_process(self):
' Grabs tracker record.\n Called by process_isbn() '
trckr = XisbnTracker.objects.order_by('bfa_last_changed_date')[0]
log.debug(('trckr.canonical_isbn, `%s`' % trckr.canonical_isbn))
return trckr<|docstring|>Grabs tracker record.
Called by process_isbn()<|endoftext|> |
210ff4aca5a3f1013aedeb3ee92268f1d148235dac9bdb882f55207cb8b32111 | def parse_pdb(path, chain):
'\n Method parses atomic coordinate data from PDB.\n\n Params:\n path - str; PDB file path\n chain - str; chain identifier\n\n Returns:\n data - np.array; PDB data\n\n '
data = []
with open(path, 'r') as f:
lines = f.readlines()
residue = None
residue_data = []
flag = False
for row in lines:
if ((row[:4] == 'ATOM') and (row[21] == chain)):
flag = True
if (residue != row[17:20]):
data.append(residue_data)
residue_data = []
residue = row[17:20]
atom_data = [row[17:20], row[12:16].strip(), row[30:38], row[38:46], row[47:54]]
residue_data.append(atom_data)
if ((row[:3] == 'TER') and flag):
break
data = np.array(data[1:])
return data | Method parses atomic coordinate data from PDB.
Params:
path - str; PDB file path
chain - str; chain identifier
Returns:
data - np.array; PDB data | src/data_processing/torsion_generation/generate_data.py | parse_pdb | rz4/DeepProteinScoring | 2 | python | def parse_pdb(path, chain):
'\n Method parses atomic coordinate data from PDB.\n\n Params:\n path - str; PDB file path\n chain - str; chain identifier\n\n Returns:\n data - np.array; PDB data\n\n '
data = []
with open(path, 'r') as f:
lines = f.readlines()
residue = None
residue_data = []
flag = False
for row in lines:
if ((row[:4] == 'ATOM') and (row[21] == chain)):
flag = True
if (residue != row[17:20]):
data.append(residue_data)
residue_data = []
residue = row[17:20]
atom_data = [row[17:20], row[12:16].strip(), row[30:38], row[38:46], row[47:54]]
residue_data.append(atom_data)
if ((row[:3] == 'TER') and flag):
break
data = np.array(data[1:])
return data | def parse_pdb(path, chain):
'\n Method parses atomic coordinate data from PDB.\n\n Params:\n path - str; PDB file path\n chain - str; chain identifier\n\n Returns:\n data - np.array; PDB data\n\n '
data = []
with open(path, 'r') as f:
lines = f.readlines()
residue = None
residue_data = []
flag = False
for row in lines:
if ((row[:4] == 'ATOM') and (row[21] == chain)):
flag = True
if (residue != row[17:20]):
data.append(residue_data)
residue_data = []
residue = row[17:20]
atom_data = [row[17:20], row[12:16].strip(), row[30:38], row[38:46], row[47:54]]
residue_data.append(atom_data)
if ((row[:3] == 'TER') and flag):
break
data = np.array(data[1:])
return data<|docstring|>Method parses atomic coordinate data from PDB.
Params:
path - str; PDB file path
chain - str; chain identifier
Returns:
data - np.array; PDB data<|endoftext|> |
0d221085b2723bfcdb7de051dfd3911bd7d23e0e230ce689f800016cb3f9ecac | def dihedral_angle(points):
'\n Method calculates dihedral angle for list of four points.\n\n Params:\n points - array; four atom x,y,z coordinates\n\n Returns:\n degree - float; dihedral angle in degrees\n\n '
p0 = points[0]
p1 = points[1]
p2 = points[2]
p3 = points[3]
b0 = ((- 1.0) * (p1 - p0))
b1 = (p2 - p1)
b2 = (p3 - p2)
b1 /= np.linalg.norm(b1)
v = (b0 - (np.dot(b0, b1) * b1))
w = (b2 - (np.dot(b2, b1) * b1))
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
degree = np.degrees(np.arctan2(y, x))
return degree | Method calculates dihedral angle for list of four points.
Params:
points - array; four atom x,y,z coordinates
Returns:
degree - float; dihedral angle in degrees | src/data_processing/torsion_generation/generate_data.py | dihedral_angle | rz4/DeepProteinScoring | 2 | python | def dihedral_angle(points):
'\n Method calculates dihedral angle for list of four points.\n\n Params:\n points - array; four atom x,y,z coordinates\n\n Returns:\n degree - float; dihedral angle in degrees\n\n '
p0 = points[0]
p1 = points[1]
p2 = points[2]
p3 = points[3]
b0 = ((- 1.0) * (p1 - p0))
b1 = (p2 - p1)
b2 = (p3 - p2)
b1 /= np.linalg.norm(b1)
v = (b0 - (np.dot(b0, b1) * b1))
w = (b2 - (np.dot(b2, b1) * b1))
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
degree = np.degrees(np.arctan2(y, x))
return degree | def dihedral_angle(points):
'\n Method calculates dihedral angle for list of four points.\n\n Params:\n points - array; four atom x,y,z coordinates\n\n Returns:\n degree - float; dihedral angle in degrees\n\n '
p0 = points[0]
p1 = points[1]
p2 = points[2]
p3 = points[3]
b0 = ((- 1.0) * (p1 - p0))
b1 = (p2 - p1)
b2 = (p3 - p2)
b1 /= np.linalg.norm(b1)
v = (b0 - (np.dot(b0, b1) * b1))
w = (b2 - (np.dot(b2, b1) * b1))
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
degree = np.degrees(np.arctan2(y, x))
return degree<|docstring|>Method calculates dihedral angle for list of four points.
Params:
points - array; four atom x,y,z coordinates
Returns:
degree - float; dihedral angle in degrees<|endoftext|> |
6fe6f098e3aad6fb124e06db459af5e69b243263bfa52256cc940a436995a330 | def calculate_dihedral_angles(protein_data):
'\n Method calculates dihedral angles for all amino acids in a given\n protein chain.\n\n Params:\n protein_data - np.array;\n\n Returns:\n dihedral_angles - np.array; Phi and Psi angles per residue\n\n '
dihedral_angles = []
for i in range(1, (len(protein_data) - 1)):
amino_0 = np.array(protein_data[(i - 1)])
c_0 = amino_0[np.where((amino_0[(:, 1)] == 'C'))][(:, 2:)]
amino_1 = np.array(protein_data[i])
n_1 = amino_1[np.where((amino_1[(:, 1)] == 'N'))][(:, 2:)]
ca_1 = amino_1[np.where((amino_1[(:, 1)] == 'CA'))][(:, 2:)]
c_1 = amino_1[np.where((amino_1[(:, 1)] == 'C'))][(:, 2:)]
amino_2 = np.array(protein_data[(i + 1)])
n_2 = amino_2[np.where((amino_2[(:, 1)] == 'N'))][(:, 2:)]
phi_atoms = np.concatenate([c_0, n_1, ca_1, c_1], axis=0)
psi_atoms = np.concatenate([n_1, ca_1, c_1, n_2], axis=0)
phi = dihedral_angle(phi_atoms.astype('float'))
psi = dihedral_angle(psi_atoms.astype('float'))
dihedral_angles.append([amino_1[(0, 0)], phi, psi])
dihedral_angles = np.array(dihedral_angles)
return dihedral_angles | Method calculates dihedral angles for all amino acids in a given
protein chain.
Params:
protein_data - np.array;
Returns:
dihedral_angles - np.array; Phi and Psi angles per residue | src/data_processing/torsion_generation/generate_data.py | calculate_dihedral_angles | rz4/DeepProteinScoring | 2 | python | def calculate_dihedral_angles(protein_data):
'\n Method calculates dihedral angles for all amino acids in a given\n protein chain.\n\n Params:\n protein_data - np.array;\n\n Returns:\n dihedral_angles - np.array; Phi and Psi angles per residue\n\n '
dihedral_angles = []
for i in range(1, (len(protein_data) - 1)):
amino_0 = np.array(protein_data[(i - 1)])
c_0 = amino_0[np.where((amino_0[(:, 1)] == 'C'))][(:, 2:)]
amino_1 = np.array(protein_data[i])
n_1 = amino_1[np.where((amino_1[(:, 1)] == 'N'))][(:, 2:)]
ca_1 = amino_1[np.where((amino_1[(:, 1)] == 'CA'))][(:, 2:)]
c_1 = amino_1[np.where((amino_1[(:, 1)] == 'C'))][(:, 2:)]
amino_2 = np.array(protein_data[(i + 1)])
n_2 = amino_2[np.where((amino_2[(:, 1)] == 'N'))][(:, 2:)]
phi_atoms = np.concatenate([c_0, n_1, ca_1, c_1], axis=0)
psi_atoms = np.concatenate([n_1, ca_1, c_1, n_2], axis=0)
phi = dihedral_angle(phi_atoms.astype('float'))
psi = dihedral_angle(psi_atoms.astype('float'))
dihedral_angles.append([amino_1[(0, 0)], phi, psi])
dihedral_angles = np.array(dihedral_angles)
return dihedral_angles | def calculate_dihedral_angles(protein_data):
'\n Method calculates dihedral angles for all amino acids in a given\n protein chain.\n\n Params:\n protein_data - np.array;\n\n Returns:\n dihedral_angles - np.array; Phi and Psi angles per residue\n\n '
dihedral_angles = []
for i in range(1, (len(protein_data) - 1)):
amino_0 = np.array(protein_data[(i - 1)])
c_0 = amino_0[np.where((amino_0[(:, 1)] == 'C'))][(:, 2:)]
amino_1 = np.array(protein_data[i])
n_1 = amino_1[np.where((amino_1[(:, 1)] == 'N'))][(:, 2:)]
ca_1 = amino_1[np.where((amino_1[(:, 1)] == 'CA'))][(:, 2:)]
c_1 = amino_1[np.where((amino_1[(:, 1)] == 'C'))][(:, 2:)]
amino_2 = np.array(protein_data[(i + 1)])
n_2 = amino_2[np.where((amino_2[(:, 1)] == 'N'))][(:, 2:)]
phi_atoms = np.concatenate([c_0, n_1, ca_1, c_1], axis=0)
psi_atoms = np.concatenate([n_1, ca_1, c_1, n_2], axis=0)
phi = dihedral_angle(phi_atoms.astype('float'))
psi = dihedral_angle(psi_atoms.astype('float'))
dihedral_angles.append([amino_1[(0, 0)], phi, psi])
dihedral_angles = np.array(dihedral_angles)
return dihedral_angles<|docstring|>Method calculates dihedral angles for all amino acids in a given
protein chain.
Params:
protein_data - np.array;
Returns:
dihedral_angles - np.array; Phi and Psi angles per residue<|endoftext|> |
eca737c2529a4305d996219f74987ae9084b719767faef46271f5bcbc8ba92f2 | def bin_dihedral_angles(protein_data, diheral_bin_count):
'\n Method bins dihedral angles into 2D data grids for each type of\n amino acid type.\n\n Params:\n protein_data - np.array;\n diheral_bin_count - int; number of bins to bin dihedral angles\n\n Returns:\n binned_dihedral_angles - np.array; final data grid of binned dihedral\n angles per residue type.\n Shape - (bin_count, bin_count, 23)\n\n '
dihedral_angles = calculate_dihedral_angles(protein_data)
binned_dihedral_angles = []
for res in residues:
i = np.where((dihedral_angles[(:, 0)] == res))
phi_angles = dihedral_angles[(i, 1)].astype('float')[0]
psi_angles = dihedral_angles[(i, 2)].astype('float')[0]
x_bins = np.linspace((- 180), 180, num=(diheral_bin_count + 1))
y_bins = np.linspace((- 180), 180, num=(diheral_bin_count + 1))
(H, x_bins, y_bins) = np.histogram2d(psi_angles, phi_angles, bins=(x_bins, y_bins))
H = gaussian_filter(H, 0.5)
binned_dihedral_angles.append(H)
binned_dihedral_angles = np.array(binned_dihedral_angles)
binned_dihedral_angles = np.transpose(binned_dihedral_angles, (1, 2, 0))
return binned_dihedral_angles | Method bins dihedral angles into 2D data grids for each type of
amino acid type.
Params:
protein_data - np.array;
diheral_bin_count - int; number of bins to bin dihedral angles
Returns:
binned_dihedral_angles - np.array; final data grid of binned dihedral
angles per residue type.
Shape - (bin_count, bin_count, 23) | src/data_processing/torsion_generation/generate_data.py | bin_dihedral_angles | rz4/DeepProteinScoring | 2 | python | def bin_dihedral_angles(protein_data, diheral_bin_count):
'\n Method bins dihedral angles into 2D data grids for each type of\n amino acid type.\n\n Params:\n protein_data - np.array;\n diheral_bin_count - int; number of bins to bin dihedral angles\n\n Returns:\n binned_dihedral_angles - np.array; final data grid of binned dihedral\n angles per residue type.\n Shape - (bin_count, bin_count, 23)\n\n '
dihedral_angles = calculate_dihedral_angles(protein_data)
binned_dihedral_angles = []
for res in residues:
i = np.where((dihedral_angles[(:, 0)] == res))
phi_angles = dihedral_angles[(i, 1)].astype('float')[0]
psi_angles = dihedral_angles[(i, 2)].astype('float')[0]
x_bins = np.linspace((- 180), 180, num=(diheral_bin_count + 1))
y_bins = np.linspace((- 180), 180, num=(diheral_bin_count + 1))
(H, x_bins, y_bins) = np.histogram2d(psi_angles, phi_angles, bins=(x_bins, y_bins))
H = gaussian_filter(H, 0.5)
binned_dihedral_angles.append(H)
binned_dihedral_angles = np.array(binned_dihedral_angles)
binned_dihedral_angles = np.transpose(binned_dihedral_angles, (1, 2, 0))
return binned_dihedral_angles | def bin_dihedral_angles(protein_data, diheral_bin_count):
'\n Method bins dihedral angles into 2D data grids for each type of\n amino acid type.\n\n Params:\n protein_data - np.array;\n diheral_bin_count - int; number of bins to bin dihedral angles\n\n Returns:\n binned_dihedral_angles - np.array; final data grid of binned dihedral\n angles per residue type.\n Shape - (bin_count, bin_count, 23)\n\n '
dihedral_angles = calculate_dihedral_angles(protein_data)
binned_dihedral_angles = []
for res in residues:
i = np.where((dihedral_angles[(:, 0)] == res))
phi_angles = dihedral_angles[(i, 1)].astype('float')[0]
psi_angles = dihedral_angles[(i, 2)].astype('float')[0]
x_bins = np.linspace((- 180), 180, num=(diheral_bin_count + 1))
y_bins = np.linspace((- 180), 180, num=(diheral_bin_count + 1))
(H, x_bins, y_bins) = np.histogram2d(psi_angles, phi_angles, bins=(x_bins, y_bins))
H = gaussian_filter(H, 0.5)
binned_dihedral_angles.append(H)
binned_dihedral_angles = np.array(binned_dihedral_angles)
binned_dihedral_angles = np.transpose(binned_dihedral_angles, (1, 2, 0))
return binned_dihedral_angles<|docstring|>Method bins dihedral angles into 2D data grids for each type of
amino acid type.
Params:
protein_data - np.array;
diheral_bin_count - int; number of bins to bin dihedral angles
Returns:
binned_dihedral_angles - np.array; final data grid of binned dihedral
angles per residue type.
Shape - (bin_count, bin_count, 23)<|endoftext|> |
dba515b951565bd5a463af1292654a191914bca68b2a104a790aeb3f28bc3a5a | def test_calc_initial_density(mol_h2o):
'\n Tests that the initial density returns a zero matrix\n and tests dimensions\n '
Duv = SCF.calc_initial_density(mol_h2o)
assert (Duv.sum() == 0.0)
assert (Duv.shape == (mol_h2o.nao, mol_h2o.nao)) | Tests that the initial density returns a zero matrix
and tests dimensions | tests/SCF/test_SCF.py | test_calc_initial_density | manishsainani/HF_SCF_Assignment | 0 | python | def test_calc_initial_density(mol_h2o):
'\n Tests that the initial density returns a zero matrix\n and tests dimensions\n '
Duv = SCF.calc_initial_density(mol_h2o)
assert (Duv.sum() == 0.0)
assert (Duv.shape == (mol_h2o.nao, mol_h2o.nao)) | def test_calc_initial_density(mol_h2o):
'\n Tests that the initial density returns a zero matrix\n and tests dimensions\n '
Duv = SCF.calc_initial_density(mol_h2o)
assert (Duv.sum() == 0.0)
assert (Duv.shape == (mol_h2o.nao, mol_h2o.nao))<|docstring|>Tests that the initial density returns a zero matrix
and tests dimensions<|endoftext|> |
9eb76019d1c3887df2f056aba79fedeb66b37b07e5401b5e352f654fb0b30513 | def uniquePathsWithObstacles(self, obstacleGrid):
'\n :type obstacleGrid: List[List[int]]\n :rtype: int\n '
if (obstacleGrid[0][0] == 1):
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
dp = [[0 for __ in range(n)] for __ in range(m)]
dp[0][0] = 1
for i in range(1, m):
dp[i][0] = (dp[(i - 1)][0] if (obstacleGrid[i][0] == 0) else 0)
for j in range(1, n):
dp[0][j] = (dp[0][(j - 1)] if (obstacleGrid[0][j] == 0) else 0)
for i in range(1, m):
for j in range(1, n):
if (obstacleGrid[i][j] == 1):
dp[i][j] = 0
else:
dp[i][j] = (dp[(i - 1)][j] + dp[i][(j - 1)])
return dp[(m - 1)][(n - 1)] | :type obstacleGrid: List[List[int]]
:rtype: int | python/063 Unique Paths II.py | uniquePathsWithObstacles | allandproust/leetcode-share | 156 | python | def uniquePathsWithObstacles(self, obstacleGrid):
'\n :type obstacleGrid: List[List[int]]\n :rtype: int\n '
if (obstacleGrid[0][0] == 1):
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
dp = [[0 for __ in range(n)] for __ in range(m)]
dp[0][0] = 1
for i in range(1, m):
dp[i][0] = (dp[(i - 1)][0] if (obstacleGrid[i][0] == 0) else 0)
for j in range(1, n):
dp[0][j] = (dp[0][(j - 1)] if (obstacleGrid[0][j] == 0) else 0)
for i in range(1, m):
for j in range(1, n):
if (obstacleGrid[i][j] == 1):
dp[i][j] = 0
else:
dp[i][j] = (dp[(i - 1)][j] + dp[i][(j - 1)])
return dp[(m - 1)][(n - 1)] | def uniquePathsWithObstacles(self, obstacleGrid):
'\n :type obstacleGrid: List[List[int]]\n :rtype: int\n '
if (obstacleGrid[0][0] == 1):
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
dp = [[0 for __ in range(n)] for __ in range(m)]
dp[0][0] = 1
for i in range(1, m):
dp[i][0] = (dp[(i - 1)][0] if (obstacleGrid[i][0] == 0) else 0)
for j in range(1, n):
dp[0][j] = (dp[0][(j - 1)] if (obstacleGrid[0][j] == 0) else 0)
for i in range(1, m):
for j in range(1, n):
if (obstacleGrid[i][j] == 1):
dp[i][j] = 0
else:
dp[i][j] = (dp[(i - 1)][j] + dp[i][(j - 1)])
return dp[(m - 1)][(n - 1)]<|docstring|>:type obstacleGrid: List[List[int]]
:rtype: int<|endoftext|> |
0498b195ee90b0ff0c079129fa2780bcdaa8e7ecd3bf7b0f2979a91201bbfb1c | @staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order\n to capture some information, but behaves like an ArgumentParser.\n '
flags.AddNodePoolClusterFlag(parser, 'The name of the cluster.')
parser.display_info.AddFormat(util.NODEPOOLS_FORMAT) | Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser. | lib/surface/container/node_pools/list.py | Args | kustodian/google-cloud-sdk | 2 | python | @staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order\n to capture some information, but behaves like an ArgumentParser.\n '
flags.AddNodePoolClusterFlag(parser, 'The name of the cluster.')
parser.display_info.AddFormat(util.NODEPOOLS_FORMAT) | @staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order\n to capture some information, but behaves like an ArgumentParser.\n '
flags.AddNodePoolClusterFlag(parser, 'The name of the cluster.')
parser.display_info.AddFormat(util.NODEPOOLS_FORMAT)<|docstring|>Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.<|endoftext|> |
7f038c0d3eff4fd42e97c0b46beed2265c34e664569be3ef5e98fe18204ad131 | def Run(self, args):
'This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n Some value that we want to have printed later.\n '
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
cluster = properties.VALUES.container.cluster.Get(required=True)
cluster_ref = adapter.ParseCluster(cluster, location)
try:
res = adapter.ListNodePools(cluster_ref)
return res.nodePools
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) | This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later. | lib/surface/container/node_pools/list.py | Run | kustodian/google-cloud-sdk | 2 | python | def Run(self, args):
'This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n Some value that we want to have printed later.\n '
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
cluster = properties.VALUES.container.cluster.Get(required=True)
cluster_ref = adapter.ParseCluster(cluster, location)
try:
res = adapter.ListNodePools(cluster_ref)
return res.nodePools
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) | def Run(self, args):
'This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n Some value that we want to have printed later.\n '
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
cluster = properties.VALUES.container.cluster.Get(required=True)
cluster_ref = adapter.ParseCluster(cluster, location)
try:
res = adapter.ListNodePools(cluster_ref)
return res.nodePools
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)<|docstring|>This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.<|endoftext|> |
708b14df719140734fb86c7646be5317450d59ccdee35ac36dfc911ee7549645 | def Action(self, scan_action, scan_direction='up'):
'\n Sarts, stops, pauses, or resumes a scan\n\n Parameters\n scan_action : one of "start", "stop", "pause", "resume"\n scan_direction : "up" or "down"\n \n '
action_dict = {'start': 0, 'stop': 1, 'pause': 2, 'resume': 3, 'down': 0, 'up': 1}
hex_rep = self.nanonisTCP.make_header('Scan.Action', body_size=6)
hex_rep += self.nanonisTCP.to_hex(action_dict[scan_action], 2)
hex_rep += self.nanonisTCP.to_hex(action_dict[scan_direction], 4)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | Sarts, stops, pauses, or resumes a scan
Parameters
scan_action : one of "start", "stop", "pause", "resume"
scan_direction : "up" or "down" | nanonisTCP/Scan.py | Action | New-Horizons-SPM/nanonisTCP | 0 | python | def Action(self, scan_action, scan_direction='up'):
'\n Sarts, stops, pauses, or resumes a scan\n\n Parameters\n scan_action : one of "start", "stop", "pause", "resume"\n scan_direction : "up" or "down"\n \n '
action_dict = {'start': 0, 'stop': 1, 'pause': 2, 'resume': 3, 'down': 0, 'up': 1}
hex_rep = self.nanonisTCP.make_header('Scan.Action', body_size=6)
hex_rep += self.nanonisTCP.to_hex(action_dict[scan_action], 2)
hex_rep += self.nanonisTCP.to_hex(action_dict[scan_direction], 4)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | def Action(self, scan_action, scan_direction='up'):
'\n Sarts, stops, pauses, or resumes a scan\n\n Parameters\n scan_action : one of "start", "stop", "pause", "resume"\n scan_direction : "up" or "down"\n \n '
action_dict = {'start': 0, 'stop': 1, 'pause': 2, 'resume': 3, 'down': 0, 'up': 1}
hex_rep = self.nanonisTCP.make_header('Scan.Action', body_size=6)
hex_rep += self.nanonisTCP.to_hex(action_dict[scan_action], 2)
hex_rep += self.nanonisTCP.to_hex(action_dict[scan_direction], 4)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0)<|docstring|>Sarts, stops, pauses, or resumes a scan
Parameters
scan_action : one of "start", "stop", "pause", "resume"
scan_direction : "up" or "down"<|endoftext|> |
d6cabbc81f93afbdd3cc1c7d5707f664a5b27f3a0104ef9c2131fd1a523671e9 | def WaitEndOfScan(self, timeout=(- 1)):
"\n Waits for the end-of-scan\n This function returns only when an end-of-scan or timeout occurs (which\n ever occurs first)\n\n Parameters\n timeout : timeout in ms. if -1, it waits indefinitely\n\n Returns\n timeout_status : 1: function timed out. 0: didn't time out\n \n\n "
hex_rep = self.nanonisTCP.make_header('Scan.WaitEndOfScan', body_size=4)
hex_rep += self.nanonisTCP.to_hex(timeout, 4)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
timeout_status = (self.nanonisTCP.hex_to_uint32(response[0:4]) > 0)
file_path_size = self.nanonisTCP.hex_to_uint32(response[4:8])
file_path = response[8:(8 + file_path_size)].decode()
self.nanonisTCP.check_error(response, (8 + file_path_size))
return [timeout_status, file_path_size, file_path] | Waits for the end-of-scan
This function returns only when an end-of-scan or timeout occurs (which
ever occurs first)
Parameters
timeout : timeout in ms. if -1, it waits indefinitely
Returns
timeout_status : 1: function timed out. 0: didn't time out | nanonisTCP/Scan.py | WaitEndOfScan | New-Horizons-SPM/nanonisTCP | 0 | python | def WaitEndOfScan(self, timeout=(- 1)):
"\n Waits for the end-of-scan\n This function returns only when an end-of-scan or timeout occurs (which\n ever occurs first)\n\n Parameters\n timeout : timeout in ms. if -1, it waits indefinitely\n\n Returns\n timeout_status : 1: function timed out. 0: didn't time out\n \n\n "
hex_rep = self.nanonisTCP.make_header('Scan.WaitEndOfScan', body_size=4)
hex_rep += self.nanonisTCP.to_hex(timeout, 4)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
timeout_status = (self.nanonisTCP.hex_to_uint32(response[0:4]) > 0)
file_path_size = self.nanonisTCP.hex_to_uint32(response[4:8])
file_path = response[8:(8 + file_path_size)].decode()
self.nanonisTCP.check_error(response, (8 + file_path_size))
return [timeout_status, file_path_size, file_path] | def WaitEndOfScan(self, timeout=(- 1)):
"\n Waits for the end-of-scan\n This function returns only when an end-of-scan or timeout occurs (which\n ever occurs first)\n\n Parameters\n timeout : timeout in ms. if -1, it waits indefinitely\n\n Returns\n timeout_status : 1: function timed out. 0: didn't time out\n \n\n "
hex_rep = self.nanonisTCP.make_header('Scan.WaitEndOfScan', body_size=4)
hex_rep += self.nanonisTCP.to_hex(timeout, 4)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
timeout_status = (self.nanonisTCP.hex_to_uint32(response[0:4]) > 0)
file_path_size = self.nanonisTCP.hex_to_uint32(response[4:8])
file_path = response[8:(8 + file_path_size)].decode()
self.nanonisTCP.check_error(response, (8 + file_path_size))
return [timeout_status, file_path_size, file_path]<|docstring|>Waits for the end-of-scan
This function returns only when an end-of-scan or timeout occurs (which
ever occurs first)
Parameters
timeout : timeout in ms. if -1, it waits indefinitely
Returns
timeout_status : 1: function timed out. 0: didn't time out<|endoftext|> |
c956f3c185868cb95da22ae3506959fb201a2949a126c69bc9c5b5340b1e88fd | def FrameSet(self, x, y, w, h, angle=0):
'\n Configures the scan frame position and dimensions\n \n Parameters\n x : centre x coordinate\n y : centre y coordinate\n w : scan frame width\n h : scan frame height\n angle : scan frame angle (degrees). angle > 0: clockwise\n \n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameSet', body_size=20)
hex_rep += self.nanonisTCP.float32_to_hex(x)
hex_rep += self.nanonisTCP.float32_to_hex(y)
hex_rep += self.nanonisTCP.float32_to_hex(w)
hex_rep += self.nanonisTCP.float32_to_hex(h)
hex_rep += self.nanonisTCP.float32_to_hex(angle)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | Configures the scan frame position and dimensions
Parameters
x : centre x coordinate
y : centre y coordinate
w : scan frame width
h : scan frame height
angle : scan frame angle (degrees). angle > 0: clockwise | nanonisTCP/Scan.py | FrameSet | New-Horizons-SPM/nanonisTCP | 0 | python | def FrameSet(self, x, y, w, h, angle=0):
'\n Configures the scan frame position and dimensions\n \n Parameters\n x : centre x coordinate\n y : centre y coordinate\n w : scan frame width\n h : scan frame height\n angle : scan frame angle (degrees). angle > 0: clockwise\n \n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameSet', body_size=20)
hex_rep += self.nanonisTCP.float32_to_hex(x)
hex_rep += self.nanonisTCP.float32_to_hex(y)
hex_rep += self.nanonisTCP.float32_to_hex(w)
hex_rep += self.nanonisTCP.float32_to_hex(h)
hex_rep += self.nanonisTCP.float32_to_hex(angle)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | def FrameSet(self, x, y, w, h, angle=0):
'\n Configures the scan frame position and dimensions\n \n Parameters\n x : centre x coordinate\n y : centre y coordinate\n w : scan frame width\n h : scan frame height\n angle : scan frame angle (degrees). angle > 0: clockwise\n \n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameSet', body_size=20)
hex_rep += self.nanonisTCP.float32_to_hex(x)
hex_rep += self.nanonisTCP.float32_to_hex(y)
hex_rep += self.nanonisTCP.float32_to_hex(w)
hex_rep += self.nanonisTCP.float32_to_hex(h)
hex_rep += self.nanonisTCP.float32_to_hex(angle)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0)<|docstring|>Configures the scan frame position and dimensions
Parameters
x : centre x coordinate
y : centre y coordinate
w : scan frame width
h : scan frame height
angle : scan frame angle (degrees). angle > 0: clockwise<|endoftext|> |
0c6a7dfa2c58581aea134b13bb58adf30e4d26cfab143b3ec580a081d014f294 | def FrameGet(self):
'\n Returns the scan frame position and dimensions\n\n Returns\n x : centre x coordinate\n y : centre y coordinate\n w : scan frame width\n h : scan frame height\n angle : scan frame angle (degrees). angle > 0: clockwise \n\n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response(20)
x = self.nanonisTCP.hex_to_float32(response[0:4])
y = self.nanonisTCP.hex_to_float32(response[4:8])
w = self.nanonisTCP.hex_to_float32(response[8:12])
h = self.nanonisTCP.hex_to_float32(response[12:16])
angle = self.nanonisTCP.hex_to_float32(response[16:20])
return [x, y, w, h, angle] | Returns the scan frame position and dimensions
Returns
x : centre x coordinate
y : centre y coordinate
w : scan frame width
h : scan frame height
angle : scan frame angle (degrees). angle > 0: clockwise | nanonisTCP/Scan.py | FrameGet | New-Horizons-SPM/nanonisTCP | 0 | python | def FrameGet(self):
'\n Returns the scan frame position and dimensions\n\n Returns\n x : centre x coordinate\n y : centre y coordinate\n w : scan frame width\n h : scan frame height\n angle : scan frame angle (degrees). angle > 0: clockwise \n\n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response(20)
x = self.nanonisTCP.hex_to_float32(response[0:4])
y = self.nanonisTCP.hex_to_float32(response[4:8])
w = self.nanonisTCP.hex_to_float32(response[8:12])
h = self.nanonisTCP.hex_to_float32(response[12:16])
angle = self.nanonisTCP.hex_to_float32(response[16:20])
return [x, y, w, h, angle] | def FrameGet(self):
'\n Returns the scan frame position and dimensions\n\n Returns\n x : centre x coordinate\n y : centre y coordinate\n w : scan frame width\n h : scan frame height\n angle : scan frame angle (degrees). angle > 0: clockwise \n\n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response(20)
x = self.nanonisTCP.hex_to_float32(response[0:4])
y = self.nanonisTCP.hex_to_float32(response[4:8])
w = self.nanonisTCP.hex_to_float32(response[8:12])
h = self.nanonisTCP.hex_to_float32(response[12:16])
angle = self.nanonisTCP.hex_to_float32(response[16:20])
return [x, y, w, h, angle]<|docstring|>Returns the scan frame position and dimensions
Returns
x : centre x coordinate
y : centre y coordinate
w : scan frame width
h : scan frame height
angle : scan frame angle (degrees). angle > 0: clockwise<|endoftext|> |
44e901898d0e3a97318e26d977949e2ef8708d1183bd45a17af0ebd2c4781cbb | def BufferSet(self, channel_indexes=None, pixels=None, lines=None):
'\n Configures the scan buffer parameters\n\n Parameters\n num_channels : number of recorded channels.\n channel_indexes : indexes of recorded channels (see signals manager or\n use Signals.InSlotsGet function)\n pixels : number of pixels per line. forced to a multiple of 16\n lines : number of scan lines\n\n '
(_, buf_channel_indexes, buf_pixels, buf_lines) = self.BufferGet()
if (not channel_indexes):
channel_indexes = buf_channel_indexes
if (not pixels):
pixels = buf_pixels
if (not lines):
lines = buf_lines
num_channels = len(channel_indexes)
body_size = (12 + (4 * num_channels))
hex_rep = self.nanonisTCP.make_header('Scan.BufferSet', body_size=body_size)
hex_rep += self.nanonisTCP.to_hex(num_channels, 4)
for c in channel_indexes:
hex_rep += self.nanonisTCP.to_hex(c, 4)
hex_rep += self.nanonisTCP.to_hex(pixels, 4)
hex_rep += self.nanonisTCP.to_hex(lines, 4)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | Configures the scan buffer parameters
Parameters
num_channels : number of recorded channels.
channel_indexes : indexes of recorded channels (see signals manager or
use Signals.InSlotsGet function)
pixels : number of pixels per line. forced to a multiple of 16
lines : number of scan lines | nanonisTCP/Scan.py | BufferSet | New-Horizons-SPM/nanonisTCP | 0 | python | def BufferSet(self, channel_indexes=None, pixels=None, lines=None):
'\n Configures the scan buffer parameters\n\n Parameters\n num_channels : number of recorded channels.\n channel_indexes : indexes of recorded channels (see signals manager or\n use Signals.InSlotsGet function)\n pixels : number of pixels per line. forced to a multiple of 16\n lines : number of scan lines\n\n '
(_, buf_channel_indexes, buf_pixels, buf_lines) = self.BufferGet()
if (not channel_indexes):
channel_indexes = buf_channel_indexes
if (not pixels):
pixels = buf_pixels
if (not lines):
lines = buf_lines
num_channels = len(channel_indexes)
body_size = (12 + (4 * num_channels))
hex_rep = self.nanonisTCP.make_header('Scan.BufferSet', body_size=body_size)
hex_rep += self.nanonisTCP.to_hex(num_channels, 4)
for c in channel_indexes:
hex_rep += self.nanonisTCP.to_hex(c, 4)
hex_rep += self.nanonisTCP.to_hex(pixels, 4)
hex_rep += self.nanonisTCP.to_hex(lines, 4)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | def BufferSet(self, channel_indexes=None, pixels=None, lines=None):
'\n Configures the scan buffer parameters\n\n Parameters\n num_channels : number of recorded channels.\n channel_indexes : indexes of recorded channels (see signals manager or\n use Signals.InSlotsGet function)\n pixels : number of pixels per line. forced to a multiple of 16\n lines : number of scan lines\n\n '
(_, buf_channel_indexes, buf_pixels, buf_lines) = self.BufferGet()
if (not channel_indexes):
channel_indexes = buf_channel_indexes
if (not pixels):
pixels = buf_pixels
if (not lines):
lines = buf_lines
num_channels = len(channel_indexes)
body_size = (12 + (4 * num_channels))
hex_rep = self.nanonisTCP.make_header('Scan.BufferSet', body_size=body_size)
hex_rep += self.nanonisTCP.to_hex(num_channels, 4)
for c in channel_indexes:
hex_rep += self.nanonisTCP.to_hex(c, 4)
hex_rep += self.nanonisTCP.to_hex(pixels, 4)
hex_rep += self.nanonisTCP.to_hex(lines, 4)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0)<|docstring|>Configures the scan buffer parameters
Parameters
num_channels : number of recorded channels.
channel_indexes : indexes of recorded channels (see signals manager or
use Signals.InSlotsGet function)
pixels : number of pixels per line. forced to a multiple of 16
lines : number of scan lines<|endoftext|> |
0c5551da438ecea7872cdbbe93800c4237c88ea4487c01867d8d9b4a81c74141 | def BufferGet(self):
'\n Returns the scan buffer parameters\n\n Returns\n num_channels : number of recorded channels.\n channel_indexes : indexes of recorded channels (see signals manager or\n use Signals.InSlotsGet function)\n pixels : number of pixels per line. forced to a multiple of 16\n lines : number of scan lines\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.BufferGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
idx = 0
channel_indexes = []
num_channels = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
for c in range(num_channels):
idx += 4
channel_indexes.append(self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)]))
idx += 4
pixels = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
lines = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
self.nanonisTCP.check_error(response, idx)
return [num_channels, channel_indexes, pixels, lines] | Returns the scan buffer parameters
Returns
num_channels : number of recorded channels.
channel_indexes : indexes of recorded channels (see signals manager or
use Signals.InSlotsGet function)
pixels : number of pixels per line. forced to a multiple of 16
lines : number of scan lines | nanonisTCP/Scan.py | BufferGet | New-Horizons-SPM/nanonisTCP | 0 | python | def BufferGet(self):
'\n Returns the scan buffer parameters\n\n Returns\n num_channels : number of recorded channels.\n channel_indexes : indexes of recorded channels (see signals manager or\n use Signals.InSlotsGet function)\n pixels : number of pixels per line. forced to a multiple of 16\n lines : number of scan lines\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.BufferGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
idx = 0
channel_indexes = []
num_channels = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
for c in range(num_channels):
idx += 4
channel_indexes.append(self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)]))
idx += 4
pixels = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
lines = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
self.nanonisTCP.check_error(response, idx)
return [num_channels, channel_indexes, pixels, lines] | def BufferGet(self):
'\n Returns the scan buffer parameters\n\n Returns\n num_channels : number of recorded channels.\n channel_indexes : indexes of recorded channels (see signals manager or\n use Signals.InSlotsGet function)\n pixels : number of pixels per line. forced to a multiple of 16\n lines : number of scan lines\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.BufferGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
idx = 0
channel_indexes = []
num_channels = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
for c in range(num_channels):
idx += 4
channel_indexes.append(self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)]))
idx += 4
pixels = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
lines = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
self.nanonisTCP.check_error(response, idx)
return [num_channels, channel_indexes, pixels, lines]<|docstring|>Returns the scan buffer parameters
Returns
num_channels : number of recorded channels.
channel_indexes : indexes of recorded channels (see signals manager or
use Signals.InSlotsGet function)
pixels : number of pixels per line. forced to a multiple of 16
lines : number of scan lines<|endoftext|> |
63c7ff28b9b7670b9e7bb404c0daee6c4f27b4db3863a43e98801682fe4d991e | def PropsSet(self, continuous_scan=0, bouncy_scan=0, autosave=0, series_name='%y%m%d_%H-%M-%S_SPM', comment=''):
"\n Configures some of the scan parameters\n\n Parameters\n continuous_scan : sets whether the scan continues or stops when a frame\n has been completed.\n 0: no change (leave previous setting)\n 1: turn on\n 2: turn off\n \n bouncy_scan : sets whether the scan direction changes when a frame\n has been completed.\n 0: no change (leave previous setting)\n 1: turn on (scan direction changes each EOS)\n 2: turn off (scan direction doesn't change each EOS)\n \n autosave : defines the save behaviour when a frame has been\n completed.\n 0: no change (leave previous setting)\n 1: save all\n 2: save next only\n 3: turn off (save none)\n series_name : is the base name used for the saved images\n \n comment : is the comment saved in the file\n\n "
series_name_size = int((len(self.nanonisTCP.string_to_hex(series_name)) / 2))
comment_size = int((len(self.nanonisTCP.string_to_hex(comment)) / 2))
body_size = ((20 + series_name_size) + comment_size)
hex_rep = self.nanonisTCP.make_header('Scan.PropsSet', body_size=body_size)
hex_rep += self.nanonisTCP.to_hex(continuous_scan, 4)
hex_rep += self.nanonisTCP.to_hex(bouncy_scan, 4)
hex_rep += self.nanonisTCP.to_hex(autosave, 4)
hex_rep += self.nanonisTCP.to_hex(series_name_size, 4)
if (series_name_size > 0):
hex_rep += self.nanonisTCP.string_to_hex(series_name)
hex_rep += self.nanonisTCP.to_hex(comment_size, 4)
if (comment_size > 0):
hex_rep += self.nanonisTCP.string_to_hex(comment)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | Configures some of the scan parameters
Parameters
continuous_scan : sets whether the scan continues or stops when a frame
has been completed.
0: no change (leave previous setting)
1: turn on
2: turn off
bouncy_scan : sets whether the scan direction changes when a frame
has been completed.
0: no change (leave previous setting)
1: turn on (scan direction changes each EOS)
2: turn off (scan direction doesn't change each EOS)
autosave : defines the save behaviour when a frame has been
completed.
0: no change (leave previous setting)
1: save all
2: save next only
3: turn off (save none)
series_name : is the base name used for the saved images
comment : is the comment saved in the file | nanonisTCP/Scan.py | PropsSet | New-Horizons-SPM/nanonisTCP | 0 | python | def PropsSet(self, continuous_scan=0, bouncy_scan=0, autosave=0, series_name='%y%m%d_%H-%M-%S_SPM', comment=):
"\n Configures some of the scan parameters\n\n Parameters\n continuous_scan : sets whether the scan continues or stops when a frame\n has been completed.\n 0: no change (leave previous setting)\n 1: turn on\n 2: turn off\n \n bouncy_scan : sets whether the scan direction changes when a frame\n has been completed.\n 0: no change (leave previous setting)\n 1: turn on (scan direction changes each EOS)\n 2: turn off (scan direction doesn't change each EOS)\n \n autosave : defines the save behaviour when a frame has been\n completed.\n 0: no change (leave previous setting)\n 1: save all\n 2: save next only\n 3: turn off (save none)\n series_name : is the base name used for the saved images\n \n comment : is the comment saved in the file\n\n "
series_name_size = int((len(self.nanonisTCP.string_to_hex(series_name)) / 2))
comment_size = int((len(self.nanonisTCP.string_to_hex(comment)) / 2))
body_size = ((20 + series_name_size) + comment_size)
hex_rep = self.nanonisTCP.make_header('Scan.PropsSet', body_size=body_size)
hex_rep += self.nanonisTCP.to_hex(continuous_scan, 4)
hex_rep += self.nanonisTCP.to_hex(bouncy_scan, 4)
hex_rep += self.nanonisTCP.to_hex(autosave, 4)
hex_rep += self.nanonisTCP.to_hex(series_name_size, 4)
if (series_name_size > 0):
hex_rep += self.nanonisTCP.string_to_hex(series_name)
hex_rep += self.nanonisTCP.to_hex(comment_size, 4)
if (comment_size > 0):
hex_rep += self.nanonisTCP.string_to_hex(comment)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | def PropsSet(self, continuous_scan=0, bouncy_scan=0, autosave=0, series_name='%y%m%d_%H-%M-%S_SPM', comment=):
"\n Configures some of the scan parameters\n\n Parameters\n continuous_scan : sets whether the scan continues or stops when a frame\n has been completed.\n 0: no change (leave previous setting)\n 1: turn on\n 2: turn off\n \n bouncy_scan : sets whether the scan direction changes when a frame\n has been completed.\n 0: no change (leave previous setting)\n 1: turn on (scan direction changes each EOS)\n 2: turn off (scan direction doesn't change each EOS)\n \n autosave : defines the save behaviour when a frame has been\n completed.\n 0: no change (leave previous setting)\n 1: save all\n 2: save next only\n 3: turn off (save none)\n series_name : is the base name used for the saved images\n \n comment : is the comment saved in the file\n\n "
series_name_size = int((len(self.nanonisTCP.string_to_hex(series_name)) / 2))
comment_size = int((len(self.nanonisTCP.string_to_hex(comment)) / 2))
body_size = ((20 + series_name_size) + comment_size)
hex_rep = self.nanonisTCP.make_header('Scan.PropsSet', body_size=body_size)
hex_rep += self.nanonisTCP.to_hex(continuous_scan, 4)
hex_rep += self.nanonisTCP.to_hex(bouncy_scan, 4)
hex_rep += self.nanonisTCP.to_hex(autosave, 4)
hex_rep += self.nanonisTCP.to_hex(series_name_size, 4)
if (series_name_size > 0):
hex_rep += self.nanonisTCP.string_to_hex(series_name)
hex_rep += self.nanonisTCP.to_hex(comment_size, 4)
if (comment_size > 0):
hex_rep += self.nanonisTCP.string_to_hex(comment)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0)<|docstring|>Configures some of the scan parameters
Parameters
continuous_scan : sets whether the scan continues or stops when a frame
has been completed.
0: no change (leave previous setting)
1: turn on
2: turn off
bouncy_scan : sets whether the scan direction changes when a frame
has been completed.
0: no change (leave previous setting)
1: turn on (scan direction changes each EOS)
2: turn off (scan direction doesn't change each EOS)
autosave : defines the save behaviour when a frame has been
completed.
0: no change (leave previous setting)
1: save all
2: save next only
3: turn off (save none)
series_name : is the base name used for the saved images
comment : is the comment saved in the file<|endoftext|> |
3f31afd4b3a210c79d52c70ad2b668dc16f8f94e47f23d15f82f0f0f50a6669d | def PropsGet(self):
"\n Returns some of the scan parameters\n \n Returns\n continuous_scan : sets whether the scan continues or stops when a frame\n has been completed.\n 0: off\n 1: on\n \n bouncy_scan : sets whether the scan direction changes when a frame\n has been completed.\n 0: off (scan direction doesn't change each EOS)\n 1: on (scan direction changes each EOS)\n \n autosave : defines the save behaviour when a frame has been\n completed.\n 0: save all\n 1: save next only\n 2: off (save none)\n series_name : is the base name used for the saved images\n \n comment : is the comment saved in the file\n\n "
hex_rep = self.nanonisTCP.make_header('Scan.PropsGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
continuous_scan = self.nanonisTCP.hex_to_uint32(response[0:4])
bouncy_scan = self.nanonisTCP.hex_to_uint32(response[4:8])
autosave = self.nanonisTCP.hex_to_uint32(response[8:12])
series_name = ''
series_name_size = self.nanonisTCP.hex_to_int32(response[12:16])
if (series_name_size > 0):
series_name = response[16:(16 + series_name_size)].decode()
comment = ''
idx = (16 + series_name_size)
comment_size = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
if (comment_size > 0):
idx += 4
comment = response[idx:(idx + comment_size)].decode()
return [continuous_scan, bouncy_scan, autosave, series_name, comment] | Returns some of the scan parameters
Returns
continuous_scan : sets whether the scan continues or stops when a frame
has been completed.
0: off
1: on
bouncy_scan : sets whether the scan direction changes when a frame
has been completed.
0: off (scan direction doesn't change each EOS)
1: on (scan direction changes each EOS)
autosave : defines the save behaviour when a frame has been
completed.
0: save all
1: save next only
2: off (save none)
series_name : is the base name used for the saved images
comment : is the comment saved in the file | nanonisTCP/Scan.py | PropsGet | New-Horizons-SPM/nanonisTCP | 0 | python | def PropsGet(self):
"\n Returns some of the scan parameters\n \n Returns\n continuous_scan : sets whether the scan continues or stops when a frame\n has been completed.\n 0: off\n 1: on\n \n bouncy_scan : sets whether the scan direction changes when a frame\n has been completed.\n 0: off (scan direction doesn't change each EOS)\n 1: on (scan direction changes each EOS)\n \n autosave : defines the save behaviour when a frame has been\n completed.\n 0: save all\n 1: save next only\n 2: off (save none)\n series_name : is the base name used for the saved images\n \n comment : is the comment saved in the file\n\n "
hex_rep = self.nanonisTCP.make_header('Scan.PropsGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
continuous_scan = self.nanonisTCP.hex_to_uint32(response[0:4])
bouncy_scan = self.nanonisTCP.hex_to_uint32(response[4:8])
autosave = self.nanonisTCP.hex_to_uint32(response[8:12])
series_name =
series_name_size = self.nanonisTCP.hex_to_int32(response[12:16])
if (series_name_size > 0):
series_name = response[16:(16 + series_name_size)].decode()
comment =
idx = (16 + series_name_size)
comment_size = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
if (comment_size > 0):
idx += 4
comment = response[idx:(idx + comment_size)].decode()
return [continuous_scan, bouncy_scan, autosave, series_name, comment] | def PropsGet(self):
"\n Returns some of the scan parameters\n \n Returns\n continuous_scan : sets whether the scan continues or stops when a frame\n has been completed.\n 0: off\n 1: on\n \n bouncy_scan : sets whether the scan direction changes when a frame\n has been completed.\n 0: off (scan direction doesn't change each EOS)\n 1: on (scan direction changes each EOS)\n \n autosave : defines the save behaviour when a frame has been\n completed.\n 0: save all\n 1: save next only\n 2: off (save none)\n series_name : is the base name used for the saved images\n \n comment : is the comment saved in the file\n\n "
hex_rep = self.nanonisTCP.make_header('Scan.PropsGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
continuous_scan = self.nanonisTCP.hex_to_uint32(response[0:4])
bouncy_scan = self.nanonisTCP.hex_to_uint32(response[4:8])
autosave = self.nanonisTCP.hex_to_uint32(response[8:12])
series_name =
series_name_size = self.nanonisTCP.hex_to_int32(response[12:16])
if (series_name_size > 0):
series_name = response[16:(16 + series_name_size)].decode()
comment =
idx = (16 + series_name_size)
comment_size = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
if (comment_size > 0):
idx += 4
comment = response[idx:(idx + comment_size)].decode()
return [continuous_scan, bouncy_scan, autosave, series_name, comment]<|docstring|>Returns some of the scan parameters
Returns
continuous_scan : sets whether the scan continues or stops when a frame
has been completed.
0: off
1: on
bouncy_scan : sets whether the scan direction changes when a frame
has been completed.
0: off (scan direction doesn't change each EOS)
1: on (scan direction changes each EOS)
autosave : defines the save behaviour when a frame has been
completed.
0: save all
1: save next only
2: off (save none)
series_name : is the base name used for the saved images
comment : is the comment saved in the file<|endoftext|> |
52003bd6b05d1877689672a61d1a79a998955a663e4df56623e73c7814c151f9 | def SpeedSet(self, fwd_speed=0, bwd_speed=0, fwd_line_time=0, bwd_line_time=0, const_param=(- 1), speed_ratio=0):
'\n Configures the scan speed parameters. Best practice here is to define\n at least (fwd_speed or fwd_line_time)\n + (corresponding bwd param or speed_ratio)\n \n e.g like one of these:\n \n SpeedSet(fwd_speed=150e-9,bwd_speed=400e-9)\n SpeedSet(fwd_line_time=1,bwd_line_time=0.25)\n SpeedSet(fwd_speed=150e-9,speed_ratio=2)\n SpeedSet(fwd_line_time=1,speed_ratio=2)\n \n Parameters\n fwd_speed : forward linear speed (m/s)\n bwd_speed : backward linear speed (m/s)\n fwd_line_time : forward time per line\n bwd_line_time : backward time per line\n const_param : defines which speed parameter to keep constant\n 0: No change (leave setting as is)\n 1: keeps linear speed constant\n 2: keeps time per line constant\n speed_ratio : defines the backward tip speed relative to the\n forward tip speed\n\n '
(buf_fwd_speed, buf_bwd_speed, buf_fwd_line_time, buf_bwd_line_time, buf_const_param, buf_speed_ratio) = self.SpeedGet()
if (const_param < 0):
if (fwd_speed and (not fwd_line_time)):
const_param = 1
if ((not fwd_speed) and fwd_line_time):
const_param = 2
if (const_param < 0):
if (bwd_speed and (not bwd_line_time)):
const_param = 1
if ((not bwd_speed) and bwd_line_time):
const_param = 2
if (bwd_speed and fwd_speed and (not speed_ratio)):
speed_ratio = (bwd_speed / fwd_speed)
if (bwd_line_time and fwd_line_time and (not speed_ratio)):
speed_ratio = (fwd_line_time / bwd_line_time)
if (not fwd_speed):
fwd_speed = buf_fwd_speed
if (not bwd_speed):
bwd_speed = buf_bwd_speed
if (not fwd_line_time):
fwd_line_time = buf_fwd_line_time
if (not bwd_line_time):
bwd_line_time = buf_bwd_line_time
if (not speed_ratio):
speed_ratio = buf_speed_ratio
hex_rep = self.nanonisTCP.make_header('Scan.SpeedSet', body_size=22)
hex_rep += self.nanonisTCP.float32_to_hex(fwd_speed)
hex_rep += self.nanonisTCP.float32_to_hex(bwd_speed)
hex_rep += self.nanonisTCP.float32_to_hex(fwd_line_time)
hex_rep += self.nanonisTCP.float32_to_hex(bwd_line_time)
hex_rep += self.nanonisTCP.to_hex(const_param, 2)
hex_rep += self.nanonisTCP.float32_to_hex(speed_ratio)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | Configures the scan speed parameters. Best practice here is to define
at least (fwd_speed or fwd_line_time)
+ (corresponding bwd param or speed_ratio)
e.g like one of these:
SpeedSet(fwd_speed=150e-9,bwd_speed=400e-9)
SpeedSet(fwd_line_time=1,bwd_line_time=0.25)
SpeedSet(fwd_speed=150e-9,speed_ratio=2)
SpeedSet(fwd_line_time=1,speed_ratio=2)
Parameters
fwd_speed : forward linear speed (m/s)
bwd_speed : backward linear speed (m/s)
fwd_line_time : forward time per line
bwd_line_time : backward time per line
const_param : defines which speed parameter to keep constant
0: No change (leave setting as is)
1: keeps linear speed constant
2: keeps time per line constant
speed_ratio : defines the backward tip speed relative to the
forward tip speed | nanonisTCP/Scan.py | SpeedSet | New-Horizons-SPM/nanonisTCP | 0 | python | def SpeedSet(self, fwd_speed=0, bwd_speed=0, fwd_line_time=0, bwd_line_time=0, const_param=(- 1), speed_ratio=0):
'\n Configures the scan speed parameters. Best practice here is to define\n at least (fwd_speed or fwd_line_time)\n + (corresponding bwd param or speed_ratio)\n \n e.g like one of these:\n \n SpeedSet(fwd_speed=150e-9,bwd_speed=400e-9)\n SpeedSet(fwd_line_time=1,bwd_line_time=0.25)\n SpeedSet(fwd_speed=150e-9,speed_ratio=2)\n SpeedSet(fwd_line_time=1,speed_ratio=2)\n \n Parameters\n fwd_speed : forward linear speed (m/s)\n bwd_speed : backward linear speed (m/s)\n fwd_line_time : forward time per line\n bwd_line_time : backward time per line\n const_param : defines which speed parameter to keep constant\n 0: No change (leave setting as is)\n 1: keeps linear speed constant\n 2: keeps time per line constant\n speed_ratio : defines the backward tip speed relative to the\n forward tip speed\n\n '
(buf_fwd_speed, buf_bwd_speed, buf_fwd_line_time, buf_bwd_line_time, buf_const_param, buf_speed_ratio) = self.SpeedGet()
if (const_param < 0):
if (fwd_speed and (not fwd_line_time)):
const_param = 1
if ((not fwd_speed) and fwd_line_time):
const_param = 2
if (const_param < 0):
if (bwd_speed and (not bwd_line_time)):
const_param = 1
if ((not bwd_speed) and bwd_line_time):
const_param = 2
if (bwd_speed and fwd_speed and (not speed_ratio)):
speed_ratio = (bwd_speed / fwd_speed)
if (bwd_line_time and fwd_line_time and (not speed_ratio)):
speed_ratio = (fwd_line_time / bwd_line_time)
if (not fwd_speed):
fwd_speed = buf_fwd_speed
if (not bwd_speed):
bwd_speed = buf_bwd_speed
if (not fwd_line_time):
fwd_line_time = buf_fwd_line_time
if (not bwd_line_time):
bwd_line_time = buf_bwd_line_time
if (not speed_ratio):
speed_ratio = buf_speed_ratio
hex_rep = self.nanonisTCP.make_header('Scan.SpeedSet', body_size=22)
hex_rep += self.nanonisTCP.float32_to_hex(fwd_speed)
hex_rep += self.nanonisTCP.float32_to_hex(bwd_speed)
hex_rep += self.nanonisTCP.float32_to_hex(fwd_line_time)
hex_rep += self.nanonisTCP.float32_to_hex(bwd_line_time)
hex_rep += self.nanonisTCP.to_hex(const_param, 2)
hex_rep += self.nanonisTCP.float32_to_hex(speed_ratio)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0) | def SpeedSet(self, fwd_speed=0, bwd_speed=0, fwd_line_time=0, bwd_line_time=0, const_param=(- 1), speed_ratio=0):
'\n Configures the scan speed parameters. Best practice here is to define\n at least (fwd_speed or fwd_line_time)\n + (corresponding bwd param or speed_ratio)\n \n e.g like one of these:\n \n SpeedSet(fwd_speed=150e-9,bwd_speed=400e-9)\n SpeedSet(fwd_line_time=1,bwd_line_time=0.25)\n SpeedSet(fwd_speed=150e-9,speed_ratio=2)\n SpeedSet(fwd_line_time=1,speed_ratio=2)\n \n Parameters\n fwd_speed : forward linear speed (m/s)\n bwd_speed : backward linear speed (m/s)\n fwd_line_time : forward time per line\n bwd_line_time : backward time per line\n const_param : defines which speed parameter to keep constant\n 0: No change (leave setting as is)\n 1: keeps linear speed constant\n 2: keeps time per line constant\n speed_ratio : defines the backward tip speed relative to the\n forward tip speed\n\n '
(buf_fwd_speed, buf_bwd_speed, buf_fwd_line_time, buf_bwd_line_time, buf_const_param, buf_speed_ratio) = self.SpeedGet()
if (const_param < 0):
if (fwd_speed and (not fwd_line_time)):
const_param = 1
if ((not fwd_speed) and fwd_line_time):
const_param = 2
if (const_param < 0):
if (bwd_speed and (not bwd_line_time)):
const_param = 1
if ((not bwd_speed) and bwd_line_time):
const_param = 2
if (bwd_speed and fwd_speed and (not speed_ratio)):
speed_ratio = (bwd_speed / fwd_speed)
if (bwd_line_time and fwd_line_time and (not speed_ratio)):
speed_ratio = (fwd_line_time / bwd_line_time)
if (not fwd_speed):
fwd_speed = buf_fwd_speed
if (not bwd_speed):
bwd_speed = buf_bwd_speed
if (not fwd_line_time):
fwd_line_time = buf_fwd_line_time
if (not bwd_line_time):
bwd_line_time = buf_bwd_line_time
if (not speed_ratio):
speed_ratio = buf_speed_ratio
hex_rep = self.nanonisTCP.make_header('Scan.SpeedSet', body_size=22)
hex_rep += self.nanonisTCP.float32_to_hex(fwd_speed)
hex_rep += self.nanonisTCP.float32_to_hex(bwd_speed)
hex_rep += self.nanonisTCP.float32_to_hex(fwd_line_time)
hex_rep += self.nanonisTCP.float32_to_hex(bwd_line_time)
hex_rep += self.nanonisTCP.to_hex(const_param, 2)
hex_rep += self.nanonisTCP.float32_to_hex(speed_ratio)
self.nanonisTCP.send_command(hex_rep)
self.nanonisTCP.receive_response(0)<|docstring|>Configures the scan speed parameters. Best practice here is to define
at least (fwd_speed or fwd_line_time)
+ (corresponding bwd param or speed_ratio)
e.g like one of these:
SpeedSet(fwd_speed=150e-9,bwd_speed=400e-9)
SpeedSet(fwd_line_time=1,bwd_line_time=0.25)
SpeedSet(fwd_speed=150e-9,speed_ratio=2)
SpeedSet(fwd_line_time=1,speed_ratio=2)
Parameters
fwd_speed : forward linear speed (m/s)
bwd_speed : backward linear speed (m/s)
fwd_line_time : forward time per line
bwd_line_time : backward time per line
const_param : defines which speed parameter to keep constant
0: No change (leave setting as is)
1: keeps linear speed constant
2: keeps time per line constant
speed_ratio : defines the backward tip speed relative to the
forward tip speed<|endoftext|> |
8fe301eb18a3b2da760ae885e1ef05cf57165563dc02eda2a5754e1e93a18088 | def SpeedGet(self):
'\n Returns the scan speed parameters\n\n Returns\n fwd_speed : forward linear speed (m/s)\n bwd_speed : backward linear speed (m/s)\n fwd_line_time : forward time per line\n bwd_line_time : backward time per line\n const_param : defines which speed parameter to keep constant\n 0: No change (leave setting as is)\n 1: keeps linear speed constant\n 2: keeps time per line constant\n speed_ratio : defines the backward tip speed relative to the\n forward tip speed\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.SpeedGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
fwd_speed = self.nanonisTCP.hex_to_float32(response[0:4])
bwd_speed = self.nanonisTCP.hex_to_float32(response[4:8])
fwd_line_time = self.nanonisTCP.hex_to_float32(response[8:12])
bwd_line_time = self.nanonisTCP.hex_to_float32(response[12:16])
const_param = self.nanonisTCP.hex_to_uint16(response[16:18])
speed_ratio = self.nanonisTCP.hex_to_float32(response[18:22])
return [fwd_speed, bwd_speed, fwd_line_time, bwd_line_time, const_param, speed_ratio] | Returns the scan speed parameters
Returns
fwd_speed : forward linear speed (m/s)
bwd_speed : backward linear speed (m/s)
fwd_line_time : forward time per line
bwd_line_time : backward time per line
const_param : defines which speed parameter to keep constant
0: No change (leave setting as is)
1: keeps linear speed constant
2: keeps time per line constant
speed_ratio : defines the backward tip speed relative to the
forward tip speed | nanonisTCP/Scan.py | SpeedGet | New-Horizons-SPM/nanonisTCP | 0 | python | def SpeedGet(self):
'\n Returns the scan speed parameters\n\n Returns\n fwd_speed : forward linear speed (m/s)\n bwd_speed : backward linear speed (m/s)\n fwd_line_time : forward time per line\n bwd_line_time : backward time per line\n const_param : defines which speed parameter to keep constant\n 0: No change (leave setting as is)\n 1: keeps linear speed constant\n 2: keeps time per line constant\n speed_ratio : defines the backward tip speed relative to the\n forward tip speed\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.SpeedGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
fwd_speed = self.nanonisTCP.hex_to_float32(response[0:4])
bwd_speed = self.nanonisTCP.hex_to_float32(response[4:8])
fwd_line_time = self.nanonisTCP.hex_to_float32(response[8:12])
bwd_line_time = self.nanonisTCP.hex_to_float32(response[12:16])
const_param = self.nanonisTCP.hex_to_uint16(response[16:18])
speed_ratio = self.nanonisTCP.hex_to_float32(response[18:22])
return [fwd_speed, bwd_speed, fwd_line_time, bwd_line_time, const_param, speed_ratio] | def SpeedGet(self):
'\n Returns the scan speed parameters\n\n Returns\n fwd_speed : forward linear speed (m/s)\n bwd_speed : backward linear speed (m/s)\n fwd_line_time : forward time per line\n bwd_line_time : backward time per line\n const_param : defines which speed parameter to keep constant\n 0: No change (leave setting as is)\n 1: keeps linear speed constant\n 2: keeps time per line constant\n speed_ratio : defines the backward tip speed relative to the\n forward tip speed\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.SpeedGet', body_size=0)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
fwd_speed = self.nanonisTCP.hex_to_float32(response[0:4])
bwd_speed = self.nanonisTCP.hex_to_float32(response[4:8])
fwd_line_time = self.nanonisTCP.hex_to_float32(response[8:12])
bwd_line_time = self.nanonisTCP.hex_to_float32(response[12:16])
const_param = self.nanonisTCP.hex_to_uint16(response[16:18])
speed_ratio = self.nanonisTCP.hex_to_float32(response[18:22])
return [fwd_speed, bwd_speed, fwd_line_time, bwd_line_time, const_param, speed_ratio]<|docstring|>Returns the scan speed parameters
Returns
fwd_speed : forward linear speed (m/s)
bwd_speed : backward linear speed (m/s)
fwd_line_time : forward time per line
bwd_line_time : backward time per line
const_param : defines which speed parameter to keep constant
0: No change (leave setting as is)
1: keeps linear speed constant
2: keeps time per line constant
speed_ratio : defines the backward tip speed relative to the
forward tip speed<|endoftext|> |
a677153a3e5cb2e8f75fbc522ac4e71a84fc6de447d174042860ee1ff48ba0ce | def FrameDataGrab(self, channel_index, data_direction):
'\n Returns the scan data of the selected frame\n\n Parameters\n channel_index : selects which channel to get the data from. The \n channel must be one of the acquired channels. The \n list of acquired channels while scanning can be \n configured by the function Scan.BufferSet or read by\n the function Scan.BufferGet\n data_direction : selects the data direction to be read.\n 0: backward\n 1: forward\n\n Returns\n -------\n None.\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameDataGrab', body_size=8)
hex_rep += self.nanonisTCP.to_hex(channel_index, 4)
hex_rep += self.nanonisTCP.to_hex(data_direction, 4)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
idx = 0
channel_name_size = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
channel_name = response[idx:(idx + channel_name_size)].decode()
idx += channel_name_size
scan_data_rows = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
scan_data_columns = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
scan_data = np.empty((scan_data_rows, scan_data_columns))
for i in range(scan_data_rows):
for j in range(scan_data_columns):
idx += 4
scan_data[(i, j)] = self.nanonisTCP.hex_to_float32(response[idx:(idx + 4)])
idx += 4
scan_direction = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
scan_direction = ['down', 'up'][scan_direction]
return [channel_name, scan_data, scan_direction] | Returns the scan data of the selected frame
Parameters
channel_index : selects which channel to get the data from. The
channel must be one of the acquired channels. The
list of acquired channels while scanning can be
configured by the function Scan.BufferSet or read by
the function Scan.BufferGet
data_direction : selects the data direction to be read.
0: backward
1: forward
Returns
-------
None. | nanonisTCP/Scan.py | FrameDataGrab | New-Horizons-SPM/nanonisTCP | 0 | python | def FrameDataGrab(self, channel_index, data_direction):
'\n Returns the scan data of the selected frame\n\n Parameters\n channel_index : selects which channel to get the data from. The \n channel must be one of the acquired channels. The \n list of acquired channels while scanning can be \n configured by the function Scan.BufferSet or read by\n the function Scan.BufferGet\n data_direction : selects the data direction to be read.\n 0: backward\n 1: forward\n\n Returns\n -------\n None.\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameDataGrab', body_size=8)
hex_rep += self.nanonisTCP.to_hex(channel_index, 4)
hex_rep += self.nanonisTCP.to_hex(data_direction, 4)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
idx = 0
channel_name_size = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
channel_name = response[idx:(idx + channel_name_size)].decode()
idx += channel_name_size
scan_data_rows = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
scan_data_columns = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
scan_data = np.empty((scan_data_rows, scan_data_columns))
for i in range(scan_data_rows):
for j in range(scan_data_columns):
idx += 4
scan_data[(i, j)] = self.nanonisTCP.hex_to_float32(response[idx:(idx + 4)])
idx += 4
scan_direction = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
scan_direction = ['down', 'up'][scan_direction]
return [channel_name, scan_data, scan_direction] | def FrameDataGrab(self, channel_index, data_direction):
'\n Returns the scan data of the selected frame\n\n Parameters\n channel_index : selects which channel to get the data from. The \n channel must be one of the acquired channels. The \n list of acquired channels while scanning can be \n configured by the function Scan.BufferSet or read by\n the function Scan.BufferGet\n data_direction : selects the data direction to be read.\n 0: backward\n 1: forward\n\n Returns\n -------\n None.\n\n '
hex_rep = self.nanonisTCP.make_header('Scan.FrameDataGrab', body_size=8)
hex_rep += self.nanonisTCP.to_hex(channel_index, 4)
hex_rep += self.nanonisTCP.to_hex(data_direction, 4)
self.nanonisTCP.send_command(hex_rep)
response = self.nanonisTCP.receive_response()
idx = 0
channel_name_size = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
channel_name = response[idx:(idx + channel_name_size)].decode()
idx += channel_name_size
scan_data_rows = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
idx += 4
scan_data_columns = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
scan_data = np.empty((scan_data_rows, scan_data_columns))
for i in range(scan_data_rows):
for j in range(scan_data_columns):
idx += 4
scan_data[(i, j)] = self.nanonisTCP.hex_to_float32(response[idx:(idx + 4)])
idx += 4
scan_direction = self.nanonisTCP.hex_to_int32(response[idx:(idx + 4)])
scan_direction = ['down', 'up'][scan_direction]
return [channel_name, scan_data, scan_direction]<|docstring|>Returns the scan data of the selected frame
Parameters
channel_index : selects which channel to get the data from. The
channel must be one of the acquired channels. The
list of acquired channels while scanning can be
configured by the function Scan.BufferSet or read by
the function Scan.BufferGet
data_direction : selects the data direction to be read.
0: backward
1: forward
Returns
-------
None.<|endoftext|> |
ef09b4a13a0c5005a448368d668b9915c1f5748deb976a10a8325c8c2dccca5c | def update_market_prices(self, update_interval=900):
"Update market prices for all shares at a specified interval\n If the stock model hasn't been updated in the specified interval, pull data from API\n\n default is 15 minutes\n "
interval_start = (datetime.now() - timedelta(seconds=update_interval))
stocks_to_update = super().get_queryset().filter(updated_at__lte=interval_start)
tickers = list(stocks_to_update.values_list('ticker', flat=True))
if (not tickers):
return
market_prices = get_stock_prices(tickers)
for stock in stocks_to_update:
stock.market_price = market_prices[stock.ticker]
stock.save() | Update market prices for all shares at a specified interval
If the stock model hasn't been updated in the specified interval, pull data from API
default is 15 minutes | budgetbuddy/stocks/managers.py | update_market_prices | michaelqknguyen/Budget-Buddy | 0 | python | def update_market_prices(self, update_interval=900):
"Update market prices for all shares at a specified interval\n If the stock model hasn't been updated in the specified interval, pull data from API\n\n default is 15 minutes\n "
interval_start = (datetime.now() - timedelta(seconds=update_interval))
stocks_to_update = super().get_queryset().filter(updated_at__lte=interval_start)
tickers = list(stocks_to_update.values_list('ticker', flat=True))
if (not tickers):
return
market_prices = get_stock_prices(tickers)
for stock in stocks_to_update:
stock.market_price = market_prices[stock.ticker]
stock.save() | def update_market_prices(self, update_interval=900):
"Update market prices for all shares at a specified interval\n If the stock model hasn't been updated in the specified interval, pull data from API\n\n default is 15 minutes\n "
interval_start = (datetime.now() - timedelta(seconds=update_interval))
stocks_to_update = super().get_queryset().filter(updated_at__lte=interval_start)
tickers = list(stocks_to_update.values_list('ticker', flat=True))
if (not tickers):
return
market_prices = get_stock_prices(tickers)
for stock in stocks_to_update:
stock.market_price = market_prices[stock.ticker]
stock.save()<|docstring|>Update market prices for all shares at a specified interval
If the stock model hasn't been updated in the specified interval, pull data from API
default is 15 minutes<|endoftext|> |
bada24a71fe28e03dcf6a49007e2f1758b37727c408fab9bcf9ba3a8d3e98af1 | def find_all_shares(self, user, stock=None, brokerage_account=None, budget_account=None):
'find all shares in all accounts for a ticker'
query_args = {'user': user, 'stock': stock, 'brokerage_account': brokerage_account, 'budget_account': budget_account}
final_query_args = {k: v for (k, v) in query_args.items() if (v is not None)}
return super().get_queryset().filter(**final_query_args) | find all shares in all accounts for a ticker | budgetbuddy/stocks/managers.py | find_all_shares | michaelqknguyen/Budget-Buddy | 0 | python | def find_all_shares(self, user, stock=None, brokerage_account=None, budget_account=None):
query_args = {'user': user, 'stock': stock, 'brokerage_account': brokerage_account, 'budget_account': budget_account}
final_query_args = {k: v for (k, v) in query_args.items() if (v is not None)}
return super().get_queryset().filter(**final_query_args) | def find_all_shares(self, user, stock=None, brokerage_account=None, budget_account=None):
query_args = {'user': user, 'stock': stock, 'brokerage_account': brokerage_account, 'budget_account': budget_account}
final_query_args = {k: v for (k, v) in query_args.items() if (v is not None)}
return super().get_queryset().filter(**final_query_args)<|docstring|>find all shares in all accounts for a ticker<|endoftext|> |
fc97b8af014003459669f68193f4a4fc099c59bfce521f8846837806a0d8f8fb | def investment_sum(self, user=None, stock=None, brokerage_account=None, budget_account=None):
'find sum of shares in accounts'
query_args = {'user': user, 'stock': stock, 'brokerage_account': brokerage_account, 'budget_account': budget_account}
final_query_args = {k: v for (k, v) in query_args.items() if (v is not None)}
queryset = super().get_queryset().filter(**final_query_args)
total = queryset.aggregate(total=Sum((F('num_shares') * F('stock__market_price'))))['total']
if (total is None):
return 0
return round(total, 2) | find sum of shares in accounts | budgetbuddy/stocks/managers.py | investment_sum | michaelqknguyen/Budget-Buddy | 0 | python | def investment_sum(self, user=None, stock=None, brokerage_account=None, budget_account=None):
query_args = {'user': user, 'stock': stock, 'brokerage_account': brokerage_account, 'budget_account': budget_account}
final_query_args = {k: v for (k, v) in query_args.items() if (v is not None)}
queryset = super().get_queryset().filter(**final_query_args)
total = queryset.aggregate(total=Sum((F('num_shares') * F('stock__market_price'))))['total']
if (total is None):
return 0
return round(total, 2) | def investment_sum(self, user=None, stock=None, brokerage_account=None, budget_account=None):
query_args = {'user': user, 'stock': stock, 'brokerage_account': brokerage_account, 'budget_account': budget_account}
final_query_args = {k: v for (k, v) in query_args.items() if (v is not None)}
queryset = super().get_queryset().filter(**final_query_args)
total = queryset.aggregate(total=Sum((F('num_shares') * F('stock__market_price'))))['total']
if (total is None):
return 0
return round(total, 2)<|docstring|>find sum of shares in accounts<|endoftext|> |
83854c8fdfd1146a69951535156016d5635e67b5f49cf3b564aed2c8a6f25996 | def binary_arg_fixer(app_metadata, args):
" Returns a copy of the args in which:\n - keys are replaced by any destination key names if they exist\n - and store_const is used rather than value for bools\n\n The reason for this function is that unlike cli args, envvars\n and config files don't have flags whose mere existance indicates\n True or False: instead config files have bools which can be set\n either way and envvars just have strings - which might have a True\n or False, 1 or 0. This code helps treat the envvars & config files\n like cli args.\n "
def get_bool_actual_value(key, orig_config_val):
assert (app_metadata[key]['type'] is bool)
assert (app_metadata[key]['action'] == 'store_const')
assert (app_metadata[key]['const'] in (True, False))
if (type(orig_config_val) is bool):
transformed_config_val = orig_config_val
elif ((orig_config_val is None) or (orig_config_val.strip().lower() in ('none', 'null'))):
comm.abort(f'Config item {key} has a non-true value of {orig_config_val}', 'This is a flag type whose value is established by pgm metadata and when provided via envvar or config file must always be set to true')
else:
transformed_config_val = (orig_config_val.strip().lower() in ('true', 't', '1', ''))
if (not transformed_config_val):
comm.abort(f'Config item {key} has a non-true value of {orig_config_val}', 'This is a flag type whose value is established by pgm metadataand when provided via envvar or config file must always be set to true')
return app_metadata[key]['const']
cleaned_args = {}
for (orig_key, val) in args.items():
try:
if (app_metadata[orig_key]['type'] != bool):
cleaned_args[orig_key] = val
else:
actual_key = app_metadata[orig_key].get('dest', orig_key)
actual_val = get_bool_actual_value(orig_key, val)
cleaned_args[actual_key] = actual_val
except KeyError:
comm.abort(f'Error: option {orig_key} is unknown')
return cleaned_args | Returns a copy of the args in which:
- keys are replaced by any destination key names if they exist
- and store_const is used rather than value for bools
The reason for this function is that unlike cli args, envvars
and config files don't have flags whose mere existance indicates
True or False: instead config files have bools which can be set
either way and envvars just have strings - which might have a True
or False, 1 or 0. This code helps treat the envvars & config files
like cli args. | datagristle/configulator.py | binary_arg_fixer | kenfar/DataGristle | 77 | python | def binary_arg_fixer(app_metadata, args):
" Returns a copy of the args in which:\n - keys are replaced by any destination key names if they exist\n - and store_const is used rather than value for bools\n\n The reason for this function is that unlike cli args, envvars\n and config files don't have flags whose mere existance indicates\n True or False: instead config files have bools which can be set\n either way and envvars just have strings - which might have a True\n or False, 1 or 0. This code helps treat the envvars & config files\n like cli args.\n "
def get_bool_actual_value(key, orig_config_val):
assert (app_metadata[key]['type'] is bool)
assert (app_metadata[key]['action'] == 'store_const')
assert (app_metadata[key]['const'] in (True, False))
if (type(orig_config_val) is bool):
transformed_config_val = orig_config_val
elif ((orig_config_val is None) or (orig_config_val.strip().lower() in ('none', 'null'))):
comm.abort(f'Config item {key} has a non-true value of {orig_config_val}', 'This is a flag type whose value is established by pgm metadata and when provided via envvar or config file must always be set to true')
else:
transformed_config_val = (orig_config_val.strip().lower() in ('true', 't', '1', ))
if (not transformed_config_val):
comm.abort(f'Config item {key} has a non-true value of {orig_config_val}', 'This is a flag type whose value is established by pgm metadataand when provided via envvar or config file must always be set to true')
return app_metadata[key]['const']
cleaned_args = {}
for (orig_key, val) in args.items():
try:
if (app_metadata[orig_key]['type'] != bool):
cleaned_args[orig_key] = val
else:
actual_key = app_metadata[orig_key].get('dest', orig_key)
actual_val = get_bool_actual_value(orig_key, val)
cleaned_args[actual_key] = actual_val
except KeyError:
comm.abort(f'Error: option {orig_key} is unknown')
return cleaned_args | def binary_arg_fixer(app_metadata, args):
" Returns a copy of the args in which:\n - keys are replaced by any destination key names if they exist\n - and store_const is used rather than value for bools\n\n The reason for this function is that unlike cli args, envvars\n and config files don't have flags whose mere existance indicates\n True or False: instead config files have bools which can be set\n either way and envvars just have strings - which might have a True\n or False, 1 or 0. This code helps treat the envvars & config files\n like cli args.\n "
def get_bool_actual_value(key, orig_config_val):
assert (app_metadata[key]['type'] is bool)
assert (app_metadata[key]['action'] == 'store_const')
assert (app_metadata[key]['const'] in (True, False))
if (type(orig_config_val) is bool):
transformed_config_val = orig_config_val
elif ((orig_config_val is None) or (orig_config_val.strip().lower() in ('none', 'null'))):
comm.abort(f'Config item {key} has a non-true value of {orig_config_val}', 'This is a flag type whose value is established by pgm metadata and when provided via envvar or config file must always be set to true')
else:
transformed_config_val = (orig_config_val.strip().lower() in ('true', 't', '1', ))
if (not transformed_config_val):
comm.abort(f'Config item {key} has a non-true value of {orig_config_val}', 'This is a flag type whose value is established by pgm metadataand when provided via envvar or config file must always be set to true')
return app_metadata[key]['const']
cleaned_args = {}
for (orig_key, val) in args.items():
try:
if (app_metadata[orig_key]['type'] != bool):
cleaned_args[orig_key] = val
else:
actual_key = app_metadata[orig_key].get('dest', orig_key)
actual_val = get_bool_actual_value(orig_key, val)
cleaned_args[actual_key] = actual_val
except KeyError:
comm.abort(f'Error: option {orig_key} is unknown')
return cleaned_args<|docstring|>Returns a copy of the args in which:
- keys are replaced by any destination key names if they exist
- and store_const is used rather than value for bools
The reason for this function is that unlike cli args, envvars
and config files don't have flags whose mere existance indicates
True or False: instead config files have bools which can be set
either way and envvars just have strings - which might have a True
or False, 1 or 0. This code helps treat the envvars & config files
like cli args.<|endoftext|> |
63b0fe4ae11ef1a84d9115a028217f9f6bc27c029b93d06cfe565e3881f6c1b5 | def extend_config(self):
' Provide calling programs function placeholder for add-ons\n\n This function exists so that additional attributes beyond what were picked up from\n cli, config file, or envvar can be added to the config. These might be derived from\n the config, or wholly new.\n '
pass | Provide calling programs function placeholder for add-ons
This function exists so that additional attributes beyond what were picked up from
cli, config file, or envvar can be added to the config. These might be derived from
the config, or wholly new. | datagristle/configulator.py | extend_config | kenfar/DataGristle | 77 | python | def extend_config(self):
' Provide calling programs function placeholder for add-ons\n\n This function exists so that additional attributes beyond what were picked up from\n cli, config file, or envvar can be added to the config. These might be derived from\n the config, or wholly new.\n '
pass | def extend_config(self):
' Provide calling programs function placeholder for add-ons\n\n This function exists so that additional attributes beyond what were picked up from\n cli, config file, or envvar can be added to the config. These might be derived from\n the config, or wholly new.\n '
pass<|docstring|>Provide calling programs function placeholder for add-ons
This function exists so that additional attributes beyond what were picked up from
cli, config file, or envvar can be added to the config. These might be derived from
the config, or wholly new.<|endoftext|> |
9f7f08d5de00ae105ed8c4e924b0e0ae360e44ae023f1d00573e2b0d31af83eb | def define_user_config(self):
' Provide calling programs function placeholder for defining config\n\n This is where the calling program defines all config items.\n '
pass | Provide calling programs function placeholder for defining config
This is where the calling program defines all config items. | datagristle/configulator.py | define_user_config | kenfar/DataGristle | 77 | python | def define_user_config(self):
' Provide calling programs function placeholder for defining config\n\n This is where the calling program defines all config items.\n '
pass | def define_user_config(self):
' Provide calling programs function placeholder for defining config\n\n This is where the calling program defines all config items.\n '
pass<|docstring|>Provide calling programs function placeholder for defining config
This is where the calling program defines all config items.<|endoftext|> |
5f0458d5bb8be966882508bf0b18ce8d571571f1f7ef7c3f3aff14463da57393 | def define_obsolete_config(self):
' Provide calling programs function placeholder for defining obsolete config\n\n This is where the calling program defines all obsolete config items.\n '
pass | Provide calling programs function placeholder for defining obsolete config
This is where the calling program defines all obsolete config items. | datagristle/configulator.py | define_obsolete_config | kenfar/DataGristle | 77 | python | def define_obsolete_config(self):
' Provide calling programs function placeholder for defining obsolete config\n\n This is where the calling program defines all obsolete config items.\n '
pass | def define_obsolete_config(self):
' Provide calling programs function placeholder for defining obsolete config\n\n This is where the calling program defines all obsolete config items.\n '
pass<|docstring|>Provide calling programs function placeholder for defining obsolete config
This is where the calling program defines all obsolete config items.<|endoftext|> |
9634f5915a0afd3cbe476218ff9f79afa10cf306c5561bae5bb1fc0472a074bb | def add_all_csv_configs(self):
' Adds the whole standard set of csv config items.\n '
self.add_standard_metadata('delimiter')
self.add_standard_metadata('quoting')
self.add_standard_metadata('quotechar')
self.add_standard_metadata('escapechar')
self.add_standard_metadata('doublequote')
self.add_standard_metadata('no_doublequote')
self.add_standard_metadata('has_header')
self.add_standard_metadata('has_no_header')
self.add_standard_metadata('skipinitialspace')
self.add_standard_metadata('no-skipinitialspace') | Adds the whole standard set of csv config items. | datagristle/configulator.py | add_all_csv_configs | kenfar/DataGristle | 77 | python | def add_all_csv_configs(self):
' \n '
self.add_standard_metadata('delimiter')
self.add_standard_metadata('quoting')
self.add_standard_metadata('quotechar')
self.add_standard_metadata('escapechar')
self.add_standard_metadata('doublequote')
self.add_standard_metadata('no_doublequote')
self.add_standard_metadata('has_header')
self.add_standard_metadata('has_no_header')
self.add_standard_metadata('skipinitialspace')
self.add_standard_metadata('no-skipinitialspace') | def add_all_csv_configs(self):
' \n '
self.add_standard_metadata('delimiter')
self.add_standard_metadata('quoting')
self.add_standard_metadata('quotechar')
self.add_standard_metadata('escapechar')
self.add_standard_metadata('doublequote')
self.add_standard_metadata('no_doublequote')
self.add_standard_metadata('has_header')
self.add_standard_metadata('has_no_header')
self.add_standard_metadata('skipinitialspace')
self.add_standard_metadata('no-skipinitialspace')<|docstring|>Adds the whole standard set of csv config items.<|endoftext|> |
701a92da93d09153eb88cc1cc753376fa706ab0d9e65cdd0471345b273c67a11 | def add_all_config_configs(self):
' Adds the standard set of config items.\n '
self.add_standard_metadata('config_fn')
self.add_standard_metadata('gen_config_fn') | Adds the standard set of config items. | datagristle/configulator.py | add_all_config_configs | kenfar/DataGristle | 77 | python | def add_all_config_configs(self):
' \n '
self.add_standard_metadata('config_fn')
self.add_standard_metadata('gen_config_fn') | def add_all_config_configs(self):
' \n '
self.add_standard_metadata('config_fn')
self.add_standard_metadata('gen_config_fn')<|docstring|>Adds the standard set of config items.<|endoftext|> |
d13565b8a4fa01863c88c6eea4220d4346f80190faa20c90c19ddcef735401da | def add_all_help_configs(self):
' Adds the standard set of help items.\n '
self.add_standard_metadata('help') | Adds the standard set of help items. | datagristle/configulator.py | add_all_help_configs | kenfar/DataGristle | 77 | python | def add_all_help_configs(self):
' \n '
self.add_standard_metadata('help') | def add_all_help_configs(self):
' \n '
self.add_standard_metadata('help')<|docstring|>Adds the standard set of help items.<|endoftext|> |
15fe64fea67e2a913db06dbed9ffb2d8b5aa5c6cee28fee0b937950deafa2ba0 | def replace_configs(self, new_config):
' Replaces the dictionary and named-tuple versions of the config\n '
self.config = new_config
self.nconfig = collections.namedtuple('Config', self.config.keys())(**self.config) | Replaces the dictionary and named-tuple versions of the config | datagristle/configulator.py | replace_configs | kenfar/DataGristle | 77 | python | def replace_configs(self, new_config):
' \n '
self.config = new_config
self.nconfig = collections.namedtuple('Config', self.config.keys())(**self.config) | def replace_configs(self, new_config):
' \n '
self.config = new_config
self.nconfig = collections.namedtuple('Config', self.config.keys())(**self.config)<|docstring|>Replaces the dictionary and named-tuple versions of the config<|endoftext|> |
ae0ceb648f1ca40dd99c17e9233aa2a80dbd43f6d6aa277d1252b75379cb1a88 | def update_config(self, key, value):
' Writes a key-value to the config.\n '
new_config = {**self.config, **{key: value}}
self.replace_configs(new_config) | Writes a key-value to the config. | datagristle/configulator.py | update_config | kenfar/DataGristle | 77 | python | def update_config(self, key, value):
' \n '
new_config = {**self.config, **{key: value}}
self.replace_configs(new_config) | def update_config(self, key, value):
' \n '
new_config = {**self.config, **{key: value}}
self.replace_configs(new_config)<|docstring|>Writes a key-value to the config.<|endoftext|> |
853e49e803d264030bfaf309eeffa1dc3ac66aaacd0f5b6a651ac9624b219cb1 | def generate_csv_dialect_config(self):
' Adds the csv dialect to the config.\n\n Added by calling programs within the extend_config method.\n '
md = self._app_metadata
try:
autodetected = csvhelper.get_dialect(infiles=self.config['infiles'], verbosity=self.config['verbosity'])
except FileNotFoundError:
comm.abort('Error: File not found', f"One of these files was not found: {','.join(self.config['infiles'])}")
overridden = csvhelper.override_dialect(autodetected, delimiter=self.config['delimiter'], quoting=self.config['quoting'], quotechar=self.config['quotechar'], has_header=self.config['has_header'], doublequote=self.config['doublequote'], escapechar=self.config['escapechar'], skipinitialspace=self.config['skipinitialspace'])
defaulted = csvhelper.default_dialect(overridden, delimiter=md['delimiter']['extended_default'], quoting=md['quoting']['extended_default'], has_header=md['has_header']['extended_default'], quotechar=md['quotechar']['extended_default'], escapechar=md['escapechar']['extended_default'], doublequote=md['doublequote']['extended_default'], skipinitialspace=md['skipinitialspace']['extended_default'])
assert csvhelper.is_valid_dialect(defaulted)
self.update_config('dialect', defaulted) | Adds the csv dialect to the config.
Added by calling programs within the extend_config method. | datagristle/configulator.py | generate_csv_dialect_config | kenfar/DataGristle | 77 | python | def generate_csv_dialect_config(self):
' Adds the csv dialect to the config.\n\n Added by calling programs within the extend_config method.\n '
md = self._app_metadata
try:
autodetected = csvhelper.get_dialect(infiles=self.config['infiles'], verbosity=self.config['verbosity'])
except FileNotFoundError:
comm.abort('Error: File not found', f"One of these files was not found: {','.join(self.config['infiles'])}")
overridden = csvhelper.override_dialect(autodetected, delimiter=self.config['delimiter'], quoting=self.config['quoting'], quotechar=self.config['quotechar'], has_header=self.config['has_header'], doublequote=self.config['doublequote'], escapechar=self.config['escapechar'], skipinitialspace=self.config['skipinitialspace'])
defaulted = csvhelper.default_dialect(overridden, delimiter=md['delimiter']['extended_default'], quoting=md['quoting']['extended_default'], has_header=md['has_header']['extended_default'], quotechar=md['quotechar']['extended_default'], escapechar=md['escapechar']['extended_default'], doublequote=md['doublequote']['extended_default'], skipinitialspace=md['skipinitialspace']['extended_default'])
assert csvhelper.is_valid_dialect(defaulted)
self.update_config('dialect', defaulted) | def generate_csv_dialect_config(self):
' Adds the csv dialect to the config.\n\n Added by calling programs within the extend_config method.\n '
md = self._app_metadata
try:
autodetected = csvhelper.get_dialect(infiles=self.config['infiles'], verbosity=self.config['verbosity'])
except FileNotFoundError:
comm.abort('Error: File not found', f"One of these files was not found: {','.join(self.config['infiles'])}")
overridden = csvhelper.override_dialect(autodetected, delimiter=self.config['delimiter'], quoting=self.config['quoting'], quotechar=self.config['quotechar'], has_header=self.config['has_header'], doublequote=self.config['doublequote'], escapechar=self.config['escapechar'], skipinitialspace=self.config['skipinitialspace'])
defaulted = csvhelper.default_dialect(overridden, delimiter=md['delimiter']['extended_default'], quoting=md['quoting']['extended_default'], has_header=md['has_header']['extended_default'], quotechar=md['quotechar']['extended_default'], escapechar=md['escapechar']['extended_default'], doublequote=md['doublequote']['extended_default'], skipinitialspace=md['skipinitialspace']['extended_default'])
assert csvhelper.is_valid_dialect(defaulted)
self.update_config('dialect', defaulted)<|docstring|>Adds the csv dialect to the config.
Added by calling programs within the extend_config method.<|endoftext|> |
5f05cfbf08ac452f6086c2cf4e7664276fb984907897d6d6b8257ba826af29a1 | def generate_csv_header_config(self):
' Adds the csv header to the config.\n\n Added by calling programs within the extend_config method.\n '
header = csvhelper.Header()
if (self.config['infiles'][0] != '-'):
header.load_from_files(self.config['infiles'], self.config['dialect'])
self.update_config('header', header) | Adds the csv header to the config.
Added by calling programs within the extend_config method. | datagristle/configulator.py | generate_csv_header_config | kenfar/DataGristle | 77 | python | def generate_csv_header_config(self):
' Adds the csv header to the config.\n\n Added by calling programs within the extend_config method.\n '
header = csvhelper.Header()
if (self.config['infiles'][0] != '-'):
header.load_from_files(self.config['infiles'], self.config['dialect'])
self.update_config('header', header) | def generate_csv_header_config(self):
' Adds the csv header to the config.\n\n Added by calling programs within the extend_config method.\n '
header = csvhelper.Header()
if (self.config['infiles'][0] != '-'):
header.load_from_files(self.config['infiles'], self.config['dialect'])
self.update_config('header', header)<|docstring|>Adds the csv header to the config.
Added by calling programs within the extend_config method.<|endoftext|> |
a170ddc845d9b9ade97600ec252cb91be2dac957f426e71c7caa8c6247d13ffc | def _validate_config_metadata(self):
" Validates the program's configuration metadata (not the user's input).\n "
for (arg, arg_parameters) in self._app_metadata.items():
for (property_name, property_value) in arg_parameters.items():
if (property_name in VALID_CONFIG_PROP_TYPES):
if (type(property_value) is not VALID_CONFIG_PROP_TYPES[property_name]):
raise ValueError(f'{arg}.{property_name} type is invalid: {property_value}')
if (property_name in VALID_CONFIG_PROP_VALUES):
if (property_value not in VALID_CONFIG_PROP_VALUES[property_name]):
raise ValueError(f'{arg}.{property_name} value not one of valid choices')
if (property_name == 'short_name'):
if (len(property_value) != 1):
raise ValueError(f'{arg}.short_name length is invalid')
if (property_name == 'dest'):
if (property_value not in self._app_metadata):
raise ValueError(f'{arg}.dest refers to non-existing option') | Validates the program's configuration metadata (not the user's input). | datagristle/configulator.py | _validate_config_metadata | kenfar/DataGristle | 77 | python | def _validate_config_metadata(self):
" \n "
for (arg, arg_parameters) in self._app_metadata.items():
for (property_name, property_value) in arg_parameters.items():
if (property_name in VALID_CONFIG_PROP_TYPES):
if (type(property_value) is not VALID_CONFIG_PROP_TYPES[property_name]):
raise ValueError(f'{arg}.{property_name} type is invalid: {property_value}')
if (property_name in VALID_CONFIG_PROP_VALUES):
if (property_value not in VALID_CONFIG_PROP_VALUES[property_name]):
raise ValueError(f'{arg}.{property_name} value not one of valid choices')
if (property_name == 'short_name'):
if (len(property_value) != 1):
raise ValueError(f'{arg}.short_name length is invalid')
if (property_name == 'dest'):
if (property_value not in self._app_metadata):
raise ValueError(f'{arg}.dest refers to non-existing option') | def _validate_config_metadata(self):
" \n "
for (arg, arg_parameters) in self._app_metadata.items():
for (property_name, property_value) in arg_parameters.items():
if (property_name in VALID_CONFIG_PROP_TYPES):
if (type(property_value) is not VALID_CONFIG_PROP_TYPES[property_name]):
raise ValueError(f'{arg}.{property_name} type is invalid: {property_value}')
if (property_name in VALID_CONFIG_PROP_VALUES):
if (property_value not in VALID_CONFIG_PROP_VALUES[property_name]):
raise ValueError(f'{arg}.{property_name} value not one of valid choices')
if (property_name == 'short_name'):
if (len(property_value) != 1):
raise ValueError(f'{arg}.short_name length is invalid')
if (property_name == 'dest'):
if (property_value not in self._app_metadata):
raise ValueError(f'{arg}.dest refers to non-existing option')<|docstring|>Validates the program's configuration metadata (not the user's input).<|endoftext|> |
3e673bbc1fa73b9e6b2efd2d1647b9ddaca414fad1f14d20489fe708a0263044 | def _consolidate_configs(self, file_args: CONFIG_TYPE, env_args: CONFIG_TYPE, cli_args: CONFIG_TYPE) -> CONFIG_TYPE:
' Consolidates environmental and cli arguments.\n\n First all _app_metadata keys are added,\n Then values from matching file, then env, then cli keys are overlaid\n '
consolidated_args: Dict[(str, Any)] = {}
def _get_actual_value(key, config_val):
bool_val = None
if (key != self._app_metadata[key].get('dest', key)):
if ((self._app_metadata[key]['type'] is bool) and (self._app_metadata[key]['action'] == 'store_const')):
bool_val = self._app_metadata[key]['const']
return bool_val
return config_val
def _init_dictionary():
for key in self._app_metadata:
actual_key = self._app_metadata[key].get('dest', key)
consolidated_args[actual_key] = None
return consolidated_args
def _add_file_args():
for (key, val) in file_args.items():
try:
actual_key = self._app_metadata[key].get('dest', key)
actual_val = _get_actual_value(key, val)
consolidated_args[actual_key] = actual_val
except KeyError:
if (key in self.obsolete_options.keys()):
comm.abort('Error: obsolete option', self.obsolete_options[key])
else:
comm.abort(f'ERROR: Unknown option: {key}')
return consolidated_args
def _add_env_args():
for (key, val) in env_args.items():
actual_key = self._app_metadata[key].get('dest', key)
consolidated_args[actual_key] = _get_actual_value(key, val)
return consolidated_args
def _add_cli_args():
for (key, val) in cli_args.items():
if ((val is not None) and (val != [])):
consolidated_args[key] = val
return consolidated_args
consolidated_args = _init_dictionary()
consolidated_args = _add_file_args()
consolidated_args = _add_env_args()
consolidated_args = _add_cli_args()
return consolidated_args | Consolidates environmental and cli arguments.
First all _app_metadata keys are added,
Then values from matching file, then env, then cli keys are overlaid | datagristle/configulator.py | _consolidate_configs | kenfar/DataGristle | 77 | python | def _consolidate_configs(self, file_args: CONFIG_TYPE, env_args: CONFIG_TYPE, cli_args: CONFIG_TYPE) -> CONFIG_TYPE:
' Consolidates environmental and cli arguments.\n\n First all _app_metadata keys are added,\n Then values from matching file, then env, then cli keys are overlaid\n '
consolidated_args: Dict[(str, Any)] = {}
def _get_actual_value(key, config_val):
bool_val = None
if (key != self._app_metadata[key].get('dest', key)):
if ((self._app_metadata[key]['type'] is bool) and (self._app_metadata[key]['action'] == 'store_const')):
bool_val = self._app_metadata[key]['const']
return bool_val
return config_val
def _init_dictionary():
for key in self._app_metadata:
actual_key = self._app_metadata[key].get('dest', key)
consolidated_args[actual_key] = None
return consolidated_args
def _add_file_args():
for (key, val) in file_args.items():
try:
actual_key = self._app_metadata[key].get('dest', key)
actual_val = _get_actual_value(key, val)
consolidated_args[actual_key] = actual_val
except KeyError:
if (key in self.obsolete_options.keys()):
comm.abort('Error: obsolete option', self.obsolete_options[key])
else:
comm.abort(f'ERROR: Unknown option: {key}')
return consolidated_args
def _add_env_args():
for (key, val) in env_args.items():
actual_key = self._app_metadata[key].get('dest', key)
consolidated_args[actual_key] = _get_actual_value(key, val)
return consolidated_args
def _add_cli_args():
for (key, val) in cli_args.items():
if ((val is not None) and (val != [])):
consolidated_args[key] = val
return consolidated_args
consolidated_args = _init_dictionary()
consolidated_args = _add_file_args()
consolidated_args = _add_env_args()
consolidated_args = _add_cli_args()
return consolidated_args | def _consolidate_configs(self, file_args: CONFIG_TYPE, env_args: CONFIG_TYPE, cli_args: CONFIG_TYPE) -> CONFIG_TYPE:
' Consolidates environmental and cli arguments.\n\n First all _app_metadata keys are added,\n Then values from matching file, then env, then cli keys are overlaid\n '
consolidated_args: Dict[(str, Any)] = {}
def _get_actual_value(key, config_val):
bool_val = None
if (key != self._app_metadata[key].get('dest', key)):
if ((self._app_metadata[key]['type'] is bool) and (self._app_metadata[key]['action'] == 'store_const')):
bool_val = self._app_metadata[key]['const']
return bool_val
return config_val
def _init_dictionary():
for key in self._app_metadata:
actual_key = self._app_metadata[key].get('dest', key)
consolidated_args[actual_key] = None
return consolidated_args
def _add_file_args():
for (key, val) in file_args.items():
try:
actual_key = self._app_metadata[key].get('dest', key)
actual_val = _get_actual_value(key, val)
consolidated_args[actual_key] = actual_val
except KeyError:
if (key in self.obsolete_options.keys()):
comm.abort('Error: obsolete option', self.obsolete_options[key])
else:
comm.abort(f'ERROR: Unknown option: {key}')
return consolidated_args
def _add_env_args():
for (key, val) in env_args.items():
actual_key = self._app_metadata[key].get('dest', key)
consolidated_args[actual_key] = _get_actual_value(key, val)
return consolidated_args
def _add_cli_args():
for (key, val) in cli_args.items():
if ((val is not None) and (val != [])):
consolidated_args[key] = val
return consolidated_args
consolidated_args = _init_dictionary()
consolidated_args = _add_file_args()
consolidated_args = _add_env_args()
consolidated_args = _add_cli_args()
return consolidated_args<|docstring|>Consolidates environmental and cli arguments.
First all _app_metadata keys are added,
Then values from matching file, then env, then cli keys are overlaid<|endoftext|> |
a0005e477fa0bbaeabf07baab8ebf953bab4ce1be1251582cec90cefd08019f6 | def _apply_std_defaults(self, config: CONFIG_TYPE) -> CONFIG_TYPE:
' Applies defaults to standard config items.\n '
temp_config = copy.copy(config)
for (key, val) in temp_config.items():
if (val is None):
temp_config[key] = self._app_metadata[key].get('default')
return temp_config | Applies defaults to standard config items. | datagristle/configulator.py | _apply_std_defaults | kenfar/DataGristle | 77 | python | def _apply_std_defaults(self, config: CONFIG_TYPE) -> CONFIG_TYPE:
' \n '
temp_config = copy.copy(config)
for (key, val) in temp_config.items():
if (val is None):
temp_config[key] = self._app_metadata[key].get('default')
return temp_config | def _apply_std_defaults(self, config: CONFIG_TYPE) -> CONFIG_TYPE:
' \n '
temp_config = copy.copy(config)
for (key, val) in temp_config.items():
if (val is None):
temp_config[key] = self._app_metadata[key].get('default')
return temp_config<|docstring|>Applies defaults to standard config items.<|endoftext|> |
efccbc3445d6f1cfaff90f3722eb98fb961f3321eb7d10174fa37c882fcf31db | def apply_custom_defaults(self, config: CONFIG_TYPE) -> CONFIG_TYPE:
' Applies defaults to custom config items.\n\n This is intended to be overriden by the user.\n '
return config | Applies defaults to custom config items.
This is intended to be overriden by the user. | datagristle/configulator.py | apply_custom_defaults | kenfar/DataGristle | 77 | python | def apply_custom_defaults(self, config: CONFIG_TYPE) -> CONFIG_TYPE:
' Applies defaults to custom config items.\n\n This is intended to be overriden by the user.\n '
return config | def apply_custom_defaults(self, config: CONFIG_TYPE) -> CONFIG_TYPE:
' Applies defaults to custom config items.\n\n This is intended to be overriden by the user.\n '
return config<|docstring|>Applies defaults to custom config items.
This is intended to be overriden by the user.<|endoftext|> |
3d8ce1caf259801db276758adc9e5ef09055723301740a16b16de34512a9e93b | def _validate_std_config(self, config: CONFIG_TYPE) -> None:
' Validates standard config items.\n '
for (key, val) in config.items():
if (key in ARG_ONLY_CONFIGS):
continue
if (key == 'col_names'):
continue
if ((val is None) or (val == [])):
if self._app_metadata[key].get('required'):
comm.abort(f"Error: option '{key}' was not provided but is required")
elif ('nargs' in self._app_metadata[key]):
continue
else:
checks = self._app_metadata[key]
if (not isinstance(val, checks['type'])):
comm.abort('Error: config value has the wrong type', f"'{key}' with value: '{val}' is not {checks['type']}")
if ('min_length' in checks):
if (len(val) < checks['min_length']):
comm.abort('Error: config value is under min_length', f"'{key}' with len of value '{val}' is < {checks['min_length']}")
if ('max_length' in checks):
if (len(val) > checks['max_length']):
comm.abort('Error: config value is over max_length', f"'{key}' with len of value '{val}' is > {checks['max_length']}")
if ('minimum' in checks):
if (val < checks['minimum']):
comm.abort(f'Error: config value less than minimum', f"'{key}' with value of '{val}' is < {checks['minimum']}")
if ('maximum' in checks):
if (val > checks['maximum']):
comm.abort(f'Error: config value greater than maximum', f"'{key}' with value of '{val}' is > {checks['maximum']}")
if ('choices' in checks):
if (val not in checks['choices']):
comm.abort(f'Error: config value not in valid list of choices', f"Valid values include: {checks['choices']} ")
self._validate_dialect_with_stdin(config) | Validates standard config items. | datagristle/configulator.py | _validate_std_config | kenfar/DataGristle | 77 | python | def _validate_std_config(self, config: CONFIG_TYPE) -> None:
' \n '
for (key, val) in config.items():
if (key in ARG_ONLY_CONFIGS):
continue
if (key == 'col_names'):
continue
if ((val is None) or (val == [])):
if self._app_metadata[key].get('required'):
comm.abort(f"Error: option '{key}' was not provided but is required")
elif ('nargs' in self._app_metadata[key]):
continue
else:
checks = self._app_metadata[key]
if (not isinstance(val, checks['type'])):
comm.abort('Error: config value has the wrong type', f"'{key}' with value: '{val}' is not {checks['type']}")
if ('min_length' in checks):
if (len(val) < checks['min_length']):
comm.abort('Error: config value is under min_length', f"'{key}' with len of value '{val}' is < {checks['min_length']}")
if ('max_length' in checks):
if (len(val) > checks['max_length']):
comm.abort('Error: config value is over max_length', f"'{key}' with len of value '{val}' is > {checks['max_length']}")
if ('minimum' in checks):
if (val < checks['minimum']):
comm.abort(f'Error: config value less than minimum', f"'{key}' with value of '{val}' is < {checks['minimum']}")
if ('maximum' in checks):
if (val > checks['maximum']):
comm.abort(f'Error: config value greater than maximum', f"'{key}' with value of '{val}' is > {checks['maximum']}")
if ('choices' in checks):
if (val not in checks['choices']):
comm.abort(f'Error: config value not in valid list of choices', f"Valid values include: {checks['choices']} ")
self._validate_dialect_with_stdin(config) | def _validate_std_config(self, config: CONFIG_TYPE) -> None:
' \n '
for (key, val) in config.items():
if (key in ARG_ONLY_CONFIGS):
continue
if (key == 'col_names'):
continue
if ((val is None) or (val == [])):
if self._app_metadata[key].get('required'):
comm.abort(f"Error: option '{key}' was not provided but is required")
elif ('nargs' in self._app_metadata[key]):
continue
else:
checks = self._app_metadata[key]
if (not isinstance(val, checks['type'])):
comm.abort('Error: config value has the wrong type', f"'{key}' with value: '{val}' is not {checks['type']}")
if ('min_length' in checks):
if (len(val) < checks['min_length']):
comm.abort('Error: config value is under min_length', f"'{key}' with len of value '{val}' is < {checks['min_length']}")
if ('max_length' in checks):
if (len(val) > checks['max_length']):
comm.abort('Error: config value is over max_length', f"'{key}' with len of value '{val}' is > {checks['max_length']}")
if ('minimum' in checks):
if (val < checks['minimum']):
comm.abort(f'Error: config value less than minimum', f"'{key}' with value of '{val}' is < {checks['minimum']}")
if ('maximum' in checks):
if (val > checks['maximum']):
comm.abort(f'Error: config value greater than maximum', f"'{key}' with value of '{val}' is > {checks['maximum']}")
if ('choices' in checks):
if (val not in checks['choices']):
comm.abort(f'Error: config value not in valid list of choices', f"Valid values include: {checks['choices']} ")
self._validate_dialect_with_stdin(config)<|docstring|>Validates standard config items.<|endoftext|> |
2d9fe585835575397230065c66768a1a4d57dadad8abbb0a09975b6c02786b8d | def validate_custom_config(self, config: CONFIG_TYPE) -> None:
' Validates custom config items.\n\n This is intended to be overriden by the user.\n '
pass | Validates custom config items.
This is intended to be overriden by the user. | datagristle/configulator.py | validate_custom_config | kenfar/DataGristle | 77 | python | def validate_custom_config(self, config: CONFIG_TYPE) -> None:
' Validates custom config items.\n\n This is intended to be overriden by the user.\n '
pass | def validate_custom_config(self, config: CONFIG_TYPE) -> None:
' Validates custom config items.\n\n This is intended to be overriden by the user.\n '
pass<|docstring|>Validates custom config items.
This is intended to be overriden by the user.<|endoftext|> |
c19d0dd8e55c7dcc58b2582b53d9144db05d961db9d5cbdb63d65cc9811e445c | def _get_gristle_app_args(self) -> CONFIG_TYPE:
' Returns a dictionary of environment keys & vals associated with the calling program.\n\n Note that the vals will be converted to the appropriate type.\n '
env_config = {}
for (envkey, envval) in sorted(self.env_args):
if (envkey in self._transform_config_keys_to_env_keys()):
option_name = self._transform_env_key_to_option_name(envkey)
env_config[option_name] = self.app_metadata[option_name]['type'](envval)
env_config_binaries_cleaned = binary_arg_fixer(self.app_metadata, env_config)
return env_config_binaries_cleaned | Returns a dictionary of environment keys & vals associated with the calling program.
Note that the vals will be converted to the appropriate type. | datagristle/configulator.py | _get_gristle_app_args | kenfar/DataGristle | 77 | python | def _get_gristle_app_args(self) -> CONFIG_TYPE:
' Returns a dictionary of environment keys & vals associated with the calling program.\n\n Note that the vals will be converted to the appropriate type.\n '
env_config = {}
for (envkey, envval) in sorted(self.env_args):
if (envkey in self._transform_config_keys_to_env_keys()):
option_name = self._transform_env_key_to_option_name(envkey)
env_config[option_name] = self.app_metadata[option_name]['type'](envval)
env_config_binaries_cleaned = binary_arg_fixer(self.app_metadata, env_config)
return env_config_binaries_cleaned | def _get_gristle_app_args(self) -> CONFIG_TYPE:
' Returns a dictionary of environment keys & vals associated with the calling program.\n\n Note that the vals will be converted to the appropriate type.\n '
env_config = {}
for (envkey, envval) in sorted(self.env_args):
if (envkey in self._transform_config_keys_to_env_keys()):
option_name = self._transform_env_key_to_option_name(envkey)
env_config[option_name] = self.app_metadata[option_name]['type'](envval)
env_config_binaries_cleaned = binary_arg_fixer(self.app_metadata, env_config)
return env_config_binaries_cleaned<|docstring|>Returns a dictionary of environment keys & vals associated with the calling program.
Note that the vals will be converted to the appropriate type.<|endoftext|> |
c7141a66008c126670fdb7f28f3d17e0f3d8bcd8d1463925b6ec641b7fdaa2ee | def _transform_config_keys_to_env_keys(self):
" Translates the configuration keys to the env key formats by adding\n 'gristle_[app_name] as a prefix to each key.\n "
env_keys = [f'{self.gristle_app_name}_{key}' for key in self.app_metadata.keys()]
return env_keys | Translates the configuration keys to the env key formats by adding
'gristle_[app_name] as a prefix to each key. | datagristle/configulator.py | _transform_config_keys_to_env_keys | kenfar/DataGristle | 77 | python | def _transform_config_keys_to_env_keys(self):
" Translates the configuration keys to the env key formats by adding\n 'gristle_[app_name] as a prefix to each key.\n "
env_keys = [f'{self.gristle_app_name}_{key}' for key in self.app_metadata.keys()]
return env_keys | def _transform_config_keys_to_env_keys(self):
" Translates the configuration keys to the env key formats by adding\n 'gristle_[app_name] as a prefix to each key.\n "
env_keys = [f'{self.gristle_app_name}_{key}' for key in self.app_metadata.keys()]
return env_keys<|docstring|>Translates the configuration keys to the env key formats by adding
'gristle_[app_name] as a prefix to each key.<|endoftext|> |
f3137cc4372c963618d39e2f400da83aef3623514beea40860394127d9d489b6 | def _transform_env_key_to_option_name(self, envkey: str) -> str:
" Translates environment key to our option name format by stripping\n 'gristle_[app_name]_' from the front of the env key\n "
option_name = envkey[len(f'{self.gristle_app_name}_'):]
return option_name | Translates environment key to our option name format by stripping
'gristle_[app_name]_' from the front of the env key | datagristle/configulator.py | _transform_env_key_to_option_name | kenfar/DataGristle | 77 | python | def _transform_env_key_to_option_name(self, envkey: str) -> str:
" Translates environment key to our option name format by stripping\n 'gristle_[app_name]_' from the front of the env key\n "
option_name = envkey[len(f'{self.gristle_app_name}_'):]
return option_name | def _transform_env_key_to_option_name(self, envkey: str) -> str:
" Translates environment key to our option name format by stripping\n 'gristle_[app_name]_' from the front of the env key\n "
option_name = envkey[len(f'{self.gristle_app_name}_'):]
return option_name<|docstring|>Translates environment key to our option name format by stripping
'gristle_[app_name]_' from the front of the env key<|endoftext|> |
8622d86b30eebf5ba6b518797a089d2979f63dbe6f867fd4245adb11f30faa5c | def _get_args(self) -> CONFIG_TYPE:
' Returns a dictionary of config keys & vals associated with the calling program.\n '
file_args: Dict[(str, Any)] = {}
if (not self.config_fn):
return file_args
(_, file_ext) = os.path.splitext(self.config_fn)
if (file_ext in ('.yaml', '.yml')):
with open(self.config_fn) as buf:
file_args = yaml.YAML(typ='safe', pure=True).load(buf)
elif (file_ext in '.json'):
with open(self.config_fn) as buf:
file_args = json.load(buf)
file_args_binaries_cleaned = binary_arg_fixer(self.app_metadata, file_args)
file_args_final = self.file_args_path_cleaner(file_args_binaries_cleaned)
return file_args_final | Returns a dictionary of config keys & vals associated with the calling program. | datagristle/configulator.py | _get_args | kenfar/DataGristle | 77 | python | def _get_args(self) -> CONFIG_TYPE:
' \n '
file_args: Dict[(str, Any)] = {}
if (not self.config_fn):
return file_args
(_, file_ext) = os.path.splitext(self.config_fn)
if (file_ext in ('.yaml', '.yml')):
with open(self.config_fn) as buf:
file_args = yaml.YAML(typ='safe', pure=True).load(buf)
elif (file_ext in '.json'):
with open(self.config_fn) as buf:
file_args = json.load(buf)
file_args_binaries_cleaned = binary_arg_fixer(self.app_metadata, file_args)
file_args_final = self.file_args_path_cleaner(file_args_binaries_cleaned)
return file_args_final | def _get_args(self) -> CONFIG_TYPE:
' \n '
file_args: Dict[(str, Any)] = {}
if (not self.config_fn):
return file_args
(_, file_ext) = os.path.splitext(self.config_fn)
if (file_ext in ('.yaml', '.yml')):
with open(self.config_fn) as buf:
file_args = yaml.YAML(typ='safe', pure=True).load(buf)
elif (file_ext in '.json'):
with open(self.config_fn) as buf:
file_args = json.load(buf)
file_args_binaries_cleaned = binary_arg_fixer(self.app_metadata, file_args)
file_args_final = self.file_args_path_cleaner(file_args_binaries_cleaned)
return file_args_final<|docstring|>Returns a dictionary of config keys & vals associated with the calling program.<|endoftext|> |
f44a8e0b45a51fd60bc2636ac26db410f65cf848130c619f40e3dbd5bd2f90ad | def _convert_file_path(self, path_key, args):
' Turn relative paths in config files into absolute paths\n\n The purpose of this is to support relative file paths - in particular\n for datagristle script cmdline testing. In this case we need to have\n configs hold file names, but their absolute location would depend on\n where one clones the repo.\n\n The way this works is that any non-absolute file name in the config\n will be joined to the absolute directory of the config file itself.\n\n For users that want to take advantage of this they just need to be\n aware that the filename in the config is relative in comparison\n to the config directory itself.\n '
if (path_key not in args):
return
config_dir = dirname(abspath(self.config_fn))
if (type(args[path_key]) == type(['foo'])):
if (args[path_key] == ['-']):
return
old_files = args[path_key]
new_files = []
for file in old_files:
if isabs(file):
new_files.append(file)
else:
new_file = pjoin(config_dir, file)
new_files.append(new_file)
args[path_key] = new_files
else:
if (args[path_key] == '-'):
return
old_file = args[path_key]
new_file = ''
if isabs(old_file):
new_file = old_file
else:
new_file = pjoin(config_dir, old_file)
args[path_key] = new_file | Turn relative paths in config files into absolute paths
The purpose of this is to support relative file paths - in particular
for datagristle script cmdline testing. In this case we need to have
configs hold file names, but their absolute location would depend on
where one clones the repo.
The way this works is that any non-absolute file name in the config
will be joined to the absolute directory of the config file itself.
For users that want to take advantage of this they just need to be
aware that the filename in the config is relative in comparison
to the config directory itself. | datagristle/configulator.py | _convert_file_path | kenfar/DataGristle | 77 | python | def _convert_file_path(self, path_key, args):
' Turn relative paths in config files into absolute paths\n\n The purpose of this is to support relative file paths - in particular\n for datagristle script cmdline testing. In this case we need to have\n configs hold file names, but their absolute location would depend on\n where one clones the repo.\n\n The way this works is that any non-absolute file name in the config\n will be joined to the absolute directory of the config file itself.\n\n For users that want to take advantage of this they just need to be\n aware that the filename in the config is relative in comparison\n to the config directory itself.\n '
if (path_key not in args):
return
config_dir = dirname(abspath(self.config_fn))
if (type(args[path_key]) == type(['foo'])):
if (args[path_key] == ['-']):
return
old_files = args[path_key]
new_files = []
for file in old_files:
if isabs(file):
new_files.append(file)
else:
new_file = pjoin(config_dir, file)
new_files.append(new_file)
args[path_key] = new_files
else:
if (args[path_key] == '-'):
return
old_file = args[path_key]
new_file =
if isabs(old_file):
new_file = old_file
else:
new_file = pjoin(config_dir, old_file)
args[path_key] = new_file | def _convert_file_path(self, path_key, args):
' Turn relative paths in config files into absolute paths\n\n The purpose of this is to support relative file paths - in particular\n for datagristle script cmdline testing. In this case we need to have\n configs hold file names, but their absolute location would depend on\n where one clones the repo.\n\n The way this works is that any non-absolute file name in the config\n will be joined to the absolute directory of the config file itself.\n\n For users that want to take advantage of this they just need to be\n aware that the filename in the config is relative in comparison\n to the config directory itself.\n '
if (path_key not in args):
return
config_dir = dirname(abspath(self.config_fn))
if (type(args[path_key]) == type(['foo'])):
if (args[path_key] == ['-']):
return
old_files = args[path_key]
new_files = []
for file in old_files:
if isabs(file):
new_files.append(file)
else:
new_file = pjoin(config_dir, file)
new_files.append(new_file)
args[path_key] = new_files
else:
if (args[path_key] == '-'):
return
old_file = args[path_key]
new_file =
if isabs(old_file):
new_file = old_file
else:
new_file = pjoin(config_dir, old_file)
args[path_key] = new_file<|docstring|>Turn relative paths in config files into absolute paths
The purpose of this is to support relative file paths - in particular
for datagristle script cmdline testing. In this case we need to have
configs hold file names, but their absolute location would depend on
where one clones the repo.
The way this works is that any non-absolute file name in the config
will be joined to the absolute directory of the config file itself.
For users that want to take advantage of this they just need to be
aware that the filename in the config is relative in comparison
to the config directory itself.<|endoftext|> |
dab31bb9284576794086e11ff393ce7d7020c86af1aa277b68356e8c3b5806b0 | def _convert_arg_name_delimiter(self, args):
'Replaces dashes in keys with underscores\n\n This is performed in order to change from the external standard of\n snake case (ex: foo-bar) to the internal standard of flattened snake case (ex: foo_bar).\n '
for old_key in list(args.keys()):
if ('-' in old_key):
new_key = old_key.replace('-', '_')
args[new_key] = args[old_key]
args.pop(old_key) | Replaces dashes in keys with underscores
This is performed in order to change from the external standard of
snake case (ex: foo-bar) to the internal standard of flattened snake case (ex: foo_bar). | datagristle/configulator.py | _convert_arg_name_delimiter | kenfar/DataGristle | 77 | python | def _convert_arg_name_delimiter(self, args):
'Replaces dashes in keys with underscores\n\n This is performed in order to change from the external standard of\n snake case (ex: foo-bar) to the internal standard of flattened snake case (ex: foo_bar).\n '
for old_key in list(args.keys()):
if ('-' in old_key):
new_key = old_key.replace('-', '_')
args[new_key] = args[old_key]
args.pop(old_key) | def _convert_arg_name_delimiter(self, args):
'Replaces dashes in keys with underscores\n\n This is performed in order to change from the external standard of\n snake case (ex: foo-bar) to the internal standard of flattened snake case (ex: foo_bar).\n '
for old_key in list(args.keys()):
if ('-' in old_key):
new_key = old_key.replace('-', '_')
args[new_key] = args[old_key]
args.pop(old_key)<|docstring|>Replaces dashes in keys with underscores
This is performed in order to change from the external standard of
snake case (ex: foo-bar) to the internal standard of flattened snake case (ex: foo_bar).<|endoftext|> |
6f29decfd94fefa9153ed4107d33e0777caa3463ade90dd1251e1e72694b1b8f | def _get_args(self) -> CONFIG_TYPE:
' Gets config items from cli arguments.\n '
self._build_parser()
(known_args, unknown_args) = self.parser.parse_known_args()
self._process_unknown_args(unknown_args)
self._process_help_args(known_args)
return vars(known_args) | Gets config items from cli arguments. | datagristle/configulator.py | _get_args | kenfar/DataGristle | 77 | python | def _get_args(self) -> CONFIG_TYPE:
' \n '
self._build_parser()
(known_args, unknown_args) = self.parser.parse_known_args()
self._process_unknown_args(unknown_args)
self._process_help_args(known_args)
return vars(known_args) | def _get_args(self) -> CONFIG_TYPE:
' \n '
self._build_parser()
(known_args, unknown_args) = self.parser.parse_known_args()
self._process_unknown_args(unknown_args)
self._process_help_args(known_args)
return vars(known_args)<|docstring|>Gets config items from cli arguments.<|endoftext|> |
c885ceaf39ee2a9b367fa87a3471f26be0b3200277efd9893b9e98868df57bde | def test_page_slugify_on_save(self):
' Tests the slug generated when saving a page '
user = User()
user.save()
page = Page(title='My Test Page', content='test', author=user)
page.save()
self.assertEqual(page.slug, 'my-test-page') | Tests the slug generated when saving a page | wiki/tests.py | test_page_slugify_on_save | GSCrawley/makewiki | 0 | python | def test_page_slugify_on_save(self):
' '
user = User()
user.save()
page = Page(title='My Test Page', content='test', author=user)
page.save()
self.assertEqual(page.slug, 'my-test-page') | def test_page_slugify_on_save(self):
' '
user = User()
user.save()
page = Page(title='My Test Page', content='test', author=user)
page.save()
self.assertEqual(page.slug, 'my-test-page')<|docstring|>Tests the slug generated when saving a page<|endoftext|> |
ef3d8c6ebce8c514c6df7fb76dceb051dbc6d87eb962440c5b6cda8bedd515d4 | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[5, 5]]), (view_data_ndarray, coords)])
def test_bbox_center(view_data, expected_coords):
'Unit test for _calculate_anchor_center. Roundtrip test in test_get_text_anchors'
anchor_data = _calculate_anchor_center(view_data, ndisplay=2)
expected_anchor_data = (expected_coords, 'center', 'center')
np.testing.assert_equal(anchor_data, expected_anchor_data) | Unit test for _calculate_anchor_center. Roundtrip test in test_get_text_anchors | napari/layers/utils/_tests/test_text_utils.py | test_bbox_center | Zac-HD/napari | 1,345 | python | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[5, 5]]), (view_data_ndarray, coords)])
def test_bbox_center(view_data, expected_coords):
anchor_data = _calculate_anchor_center(view_data, ndisplay=2)
expected_anchor_data = (expected_coords, 'center', 'center')
np.testing.assert_equal(anchor_data, expected_anchor_data) | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[5, 5]]), (view_data_ndarray, coords)])
def test_bbox_center(view_data, expected_coords):
anchor_data = _calculate_anchor_center(view_data, ndisplay=2)
expected_anchor_data = (expected_coords, 'center', 'center')
np.testing.assert_equal(anchor_data, expected_anchor_data)<|docstring|>Unit test for _calculate_anchor_center. Roundtrip test in test_get_text_anchors<|endoftext|> |
fe0f34a88c30df33ec169e25cd52f008d4c9fd23663edc845410d5b5962476b9 | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[0, 0]]), (view_data_ndarray, coords)])
def test_bbox_upper_left(view_data, expected_coords):
'Unit test for _calculate_anchor_upper_left. Roundtrip test in test_get_text_anchors'
expected_anchor_data = (expected_coords, 'left', 'top')
anchor_data = _calculate_anchor_upper_left(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | Unit test for _calculate_anchor_upper_left. Roundtrip test in test_get_text_anchors | napari/layers/utils/_tests/test_text_utils.py | test_bbox_upper_left | Zac-HD/napari | 1,345 | python | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[0, 0]]), (view_data_ndarray, coords)])
def test_bbox_upper_left(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'left', 'top')
anchor_data = _calculate_anchor_upper_left(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[0, 0]]), (view_data_ndarray, coords)])
def test_bbox_upper_left(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'left', 'top')
anchor_data = _calculate_anchor_upper_left(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data)<|docstring|>Unit test for _calculate_anchor_upper_left. Roundtrip test in test_get_text_anchors<|endoftext|> |
6965285d351ad970ce8c65a2f727d5f060e741af34ca961150f64b0a710fe5d9 | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[0, 10]]), (view_data_ndarray, coords)])
def test_bbox_upper_right(view_data, expected_coords):
'Unit test for _calculate_anchor_upper_right. Roundtrip test in test_get_text_anchors'
expected_anchor_data = (expected_coords, 'right', 'top')
anchor_data = _calculate_anchor_upper_right(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | Unit test for _calculate_anchor_upper_right. Roundtrip test in test_get_text_anchors | napari/layers/utils/_tests/test_text_utils.py | test_bbox_upper_right | Zac-HD/napari | 1,345 | python | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[0, 10]]), (view_data_ndarray, coords)])
def test_bbox_upper_right(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'right', 'top')
anchor_data = _calculate_anchor_upper_right(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[0, 10]]), (view_data_ndarray, coords)])
def test_bbox_upper_right(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'right', 'top')
anchor_data = _calculate_anchor_upper_right(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data)<|docstring|>Unit test for _calculate_anchor_upper_right. Roundtrip test in test_get_text_anchors<|endoftext|> |
99fb5f490af7fe74d5d4a9b61cc55038b80aceae4f5f6278021ad8e1c78c3184 | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[10, 0]]), (view_data_ndarray, coords)])
def test_bbox_lower_left(view_data, expected_coords):
'Unit test for _calculate_anchor_lower_left. Roundtrip test in test_get_text_anchors'
expected_anchor_data = (expected_coords, 'left', 'bottom')
anchor_data = _calculate_anchor_lower_left(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | Unit test for _calculate_anchor_lower_left. Roundtrip test in test_get_text_anchors | napari/layers/utils/_tests/test_text_utils.py | test_bbox_lower_left | Zac-HD/napari | 1,345 | python | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[10, 0]]), (view_data_ndarray, coords)])
def test_bbox_lower_left(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'left', 'bottom')
anchor_data = _calculate_anchor_lower_left(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[10, 0]]), (view_data_ndarray, coords)])
def test_bbox_lower_left(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'left', 'bottom')
anchor_data = _calculate_anchor_lower_left(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data)<|docstring|>Unit test for _calculate_anchor_lower_left. Roundtrip test in test_get_text_anchors<|endoftext|> |
ebaad50afd6168539116c0e250e5526c911a628e93fdf6130762575197cbcf53 | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[10, 10]]), (view_data_ndarray, coords)])
def test_bbox_lower_right(view_data, expected_coords):
'Unit test for _calculate_anchor_lower_right. Roundtrip test in test_get_text_anchors'
expected_anchor_data = (expected_coords, 'right', 'bottom')
anchor_data = _calculate_anchor_lower_right(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | Unit test for _calculate_anchor_lower_right. Roundtrip test in test_get_text_anchors | napari/layers/utils/_tests/test_text_utils.py | test_bbox_lower_right | Zac-HD/napari | 1,345 | python | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[10, 10]]), (view_data_ndarray, coords)])
def test_bbox_lower_right(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'right', 'bottom')
anchor_data = _calculate_anchor_lower_right(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data) | @pytest.mark.parametrize('view_data,expected_coords', [(view_data_list, [[10, 10]]), (view_data_ndarray, coords)])
def test_bbox_lower_right(view_data, expected_coords):
expected_anchor_data = (expected_coords, 'right', 'bottom')
anchor_data = _calculate_anchor_lower_right(view_data, ndisplay=2)
np.testing.assert_equal(anchor_data, expected_anchor_data)<|docstring|>Unit test for _calculate_anchor_lower_right. Roundtrip test in test_get_text_anchors<|endoftext|> |
d55d42fe50b80d406824907b60ab8859343e11b9666380b38e1276b4bf08dc5b | @pytest.mark.parametrize('anchor_type,ndisplay,expected_coords', [(Anchor.CENTER, 2, [[5, 5]]), (Anchor.UPPER_LEFT, 2, [[0, 0]]), (Anchor.UPPER_RIGHT, 2, [[0, 10]]), (Anchor.LOWER_LEFT, 2, [[10, 0]]), (Anchor.LOWER_RIGHT, 2, [[10, 10]]), (Anchor.CENTER, 3, [[5, 5]]), (Anchor.UPPER_LEFT, 3, [[5, 5]]), (Anchor.UPPER_RIGHT, 3, [[5, 5]]), (Anchor.LOWER_LEFT, 3, [[5, 5]]), (Anchor.LOWER_RIGHT, 3, [[5, 5]])])
def test_get_text_anchors(anchor_type, ndisplay, expected_coords):
'Round trip tests for getting anchor coordinates.'
coords = [np.array([[0, 0], [10, 0], [0, 10], [10, 10]])]
(anchor_coords, _, _) = get_text_anchors(coords, anchor=anchor_type, ndisplay=ndisplay)
np.testing.assert_equal(anchor_coords, expected_coords) | Round trip tests for getting anchor coordinates. | napari/layers/utils/_tests/test_text_utils.py | test_get_text_anchors | Zac-HD/napari | 1,345 | python | @pytest.mark.parametrize('anchor_type,ndisplay,expected_coords', [(Anchor.CENTER, 2, [[5, 5]]), (Anchor.UPPER_LEFT, 2, [[0, 0]]), (Anchor.UPPER_RIGHT, 2, [[0, 10]]), (Anchor.LOWER_LEFT, 2, [[10, 0]]), (Anchor.LOWER_RIGHT, 2, [[10, 10]]), (Anchor.CENTER, 3, [[5, 5]]), (Anchor.UPPER_LEFT, 3, [[5, 5]]), (Anchor.UPPER_RIGHT, 3, [[5, 5]]), (Anchor.LOWER_LEFT, 3, [[5, 5]]), (Anchor.LOWER_RIGHT, 3, [[5, 5]])])
def test_get_text_anchors(anchor_type, ndisplay, expected_coords):
coords = [np.array([[0, 0], [10, 0], [0, 10], [10, 10]])]
(anchor_coords, _, _) = get_text_anchors(coords, anchor=anchor_type, ndisplay=ndisplay)
np.testing.assert_equal(anchor_coords, expected_coords) | @pytest.mark.parametrize('anchor_type,ndisplay,expected_coords', [(Anchor.CENTER, 2, [[5, 5]]), (Anchor.UPPER_LEFT, 2, [[0, 0]]), (Anchor.UPPER_RIGHT, 2, [[0, 10]]), (Anchor.LOWER_LEFT, 2, [[10, 0]]), (Anchor.LOWER_RIGHT, 2, [[10, 10]]), (Anchor.CENTER, 3, [[5, 5]]), (Anchor.UPPER_LEFT, 3, [[5, 5]]), (Anchor.UPPER_RIGHT, 3, [[5, 5]]), (Anchor.LOWER_LEFT, 3, [[5, 5]]), (Anchor.LOWER_RIGHT, 3, [[5, 5]])])
def test_get_text_anchors(anchor_type, ndisplay, expected_coords):
coords = [np.array([[0, 0], [10, 0], [0, 10], [10, 10]])]
(anchor_coords, _, _) = get_text_anchors(coords, anchor=anchor_type, ndisplay=ndisplay)
np.testing.assert_equal(anchor_coords, expected_coords)<|docstring|>Round trip tests for getting anchor coordinates.<|endoftext|> |
5b8a51f0fbbf909f1a234ad0d570fe036d7307148ae2b057264c40d49b7cfb82 | def test_bbox_centers_exception():
'_calculate_bbox_centers should raise a TypeError for non ndarray or list inputs'
with pytest.raises(TypeError):
_ = _calculate_bbox_centers({'bad_data_type': True}) | _calculate_bbox_centers should raise a TypeError for non ndarray or list inputs | napari/layers/utils/_tests/test_text_utils.py | test_bbox_centers_exception | Zac-HD/napari | 1,345 | python | def test_bbox_centers_exception():
with pytest.raises(TypeError):
_ = _calculate_bbox_centers({'bad_data_type': True}) | def test_bbox_centers_exception():
with pytest.raises(TypeError):
_ = _calculate_bbox_centers({'bad_data_type': True})<|docstring|>_calculate_bbox_centers should raise a TypeError for non ndarray or list inputs<|endoftext|> |
fa69db25d703eaf85ba28fc637a8a395b4e521fdc88a6b25a242fc406e7e2cab | def test_bbox_extents_exception():
'_calculate_bbox_extents should raise a TypeError for non ndarray or list inputs'
with pytest.raises(TypeError):
_ = _calculate_bbox_extents({'bad_data_type': True}) | _calculate_bbox_extents should raise a TypeError for non ndarray or list inputs | napari/layers/utils/_tests/test_text_utils.py | test_bbox_extents_exception | Zac-HD/napari | 1,345 | python | def test_bbox_extents_exception():
with pytest.raises(TypeError):
_ = _calculate_bbox_extents({'bad_data_type': True}) | def test_bbox_extents_exception():
with pytest.raises(TypeError):
_ = _calculate_bbox_extents({'bad_data_type': True})<|docstring|>_calculate_bbox_extents should raise a TypeError for non ndarray or list inputs<|endoftext|> |
a11161629cb99c05c62b41d13cf61d5cbda67dc66307e077d2d68a9ae07e0f59 | def stop_voting(self, user):
"\n Sets the user's remaining amount to 0 and sets the cooldown based on the remaining amount.\n :param user: The user to bew removed\n :return:\n "
data = hf.get_vote_data()
if (user in hf.get_users_on_cooldown()):
remaining_vote = 0
user_cooldown_vote_value = data['Users On Cooldown'][user]['amount']
if (user_cooldown_vote_value != 'all'):
if (user_cooldown_vote_value <= max_vote_rate):
remaining_vote = data['Users On Cooldown'][user]['amount']
else:
remaining_vote = max_vote_rate
else:
remaining_vote = max_vote_rate
remaining_cooldown = hf.get_dynamic_cooldown_amount(remaining_vote)
now = time.time()
new_cooldown = (now + remaining_cooldown)
data['Users On Cooldown'][user]['cooldown end'] = new_cooldown
data['Users On Cooldown'][user]['amount'] = 0
hf.update_vote_data(data) | Sets the user's remaining amount to 0 and sets the cooldown based on the remaining amount.
:param user: The user to bew removed
:return: | voting/vote_manager.py | stop_voting | NewtC1/Salamandbot2.0 | 2 | python | def stop_voting(self, user):
"\n Sets the user's remaining amount to 0 and sets the cooldown based on the remaining amount.\n :param user: The user to bew removed\n :return:\n "
data = hf.get_vote_data()
if (user in hf.get_users_on_cooldown()):
remaining_vote = 0
user_cooldown_vote_value = data['Users On Cooldown'][user]['amount']
if (user_cooldown_vote_value != 'all'):
if (user_cooldown_vote_value <= max_vote_rate):
remaining_vote = data['Users On Cooldown'][user]['amount']
else:
remaining_vote = max_vote_rate
else:
remaining_vote = max_vote_rate
remaining_cooldown = hf.get_dynamic_cooldown_amount(remaining_vote)
now = time.time()
new_cooldown = (now + remaining_cooldown)
data['Users On Cooldown'][user]['cooldown end'] = new_cooldown
data['Users On Cooldown'][user]['amount'] = 0
hf.update_vote_data(data) | def stop_voting(self, user):
"\n Sets the user's remaining amount to 0 and sets the cooldown based on the remaining amount.\n :param user: The user to bew removed\n :return:\n "
data = hf.get_vote_data()
if (user in hf.get_users_on_cooldown()):
remaining_vote = 0
user_cooldown_vote_value = data['Users On Cooldown'][user]['amount']
if (user_cooldown_vote_value != 'all'):
if (user_cooldown_vote_value <= max_vote_rate):
remaining_vote = data['Users On Cooldown'][user]['amount']
else:
remaining_vote = max_vote_rate
else:
remaining_vote = max_vote_rate
remaining_cooldown = hf.get_dynamic_cooldown_amount(remaining_vote)
now = time.time()
new_cooldown = (now + remaining_cooldown)
data['Users On Cooldown'][user]['cooldown end'] = new_cooldown
data['Users On Cooldown'][user]['amount'] = 0
hf.update_vote_data(data)<|docstring|>Sets the user's remaining amount to 0 and sets the cooldown based on the remaining amount.
:param user: The user to bew removed
:return:<|endoftext|> |
c1022d86e01fefe3b14e4dff1aa65565d425701cfc4ae950c29c8bf31e42e8ca | def calculate_multiplier(self, target):
'\n Assumes the target is valid. Returns the correct vote multiplier.\n :param target:\n :return:\n '
thresholds = dict(zip([x for x in range(10, 60, 10)], hf.settings['settings']['vote_multiplier_threshold']))
length_of_game = hf.get_length_of_game(target)
if (length_of_game == 0):
return 1.0
for threshold in thresholds.keys():
if (length_of_game < threshold):
return thresholds[threshold]
return 1 | Assumes the target is valid. Returns the correct vote multiplier.
:param target:
:return: | voting/vote_manager.py | calculate_multiplier | NewtC1/Salamandbot2.0 | 2 | python | def calculate_multiplier(self, target):
'\n Assumes the target is valid. Returns the correct vote multiplier.\n :param target:\n :return:\n '
thresholds = dict(zip([x for x in range(10, 60, 10)], hf.settings['settings']['vote_multiplier_threshold']))
length_of_game = hf.get_length_of_game(target)
if (length_of_game == 0):
return 1.0
for threshold in thresholds.keys():
if (length_of_game < threshold):
return thresholds[threshold]
return 1 | def calculate_multiplier(self, target):
'\n Assumes the target is valid. Returns the correct vote multiplier.\n :param target:\n :return:\n '
thresholds = dict(zip([x for x in range(10, 60, 10)], hf.settings['settings']['vote_multiplier_threshold']))
length_of_game = hf.get_length_of_game(target)
if (length_of_game == 0):
return 1.0
for threshold in thresholds.keys():
if (length_of_game < threshold):
return thresholds[threshold]
return 1<|docstring|>Assumes the target is valid. Returns the correct vote multiplier.
:param target:
:return:<|endoftext|> |
bc4d6396ee4f49b9122035df43c87ae2cf30946dd33dd40518af50ff46a42e79 | def get_pending_profile_change():
'\n Gets the next event that should occur given the current time.\n :return: The time of the next event, or None if there is no pending event.\n '
schedule = hf.get_vote_data()['Active Profile Schedule']
if schedule:
schedule_times = []
now = time.localtime()
for item_time in schedule.keys():
new_time = time.strptime(f'{now.tm_year} {now.tm_mon} {now.tm_mday} {item_time}', '%Y %m %d %H:%M')
schedule_times.append(new_time)
schedule_times.sort()
next_profile_change = None
if schedule_times:
for times in schedule_times:
if (times > now):
break
next_profile_change = times
if next_profile_change:
schedule_index = f'{next_profile_change[3]}:{str(next_profile_change[4]).zfill(2)}'
if (hf.get_active_profile() == schedule[schedule_index]):
return None
else:
schedule_index = f'{next_profile_change[3]}:{str(next_profile_change[4]).zfill(2)}'
return schedule_index
else:
return None
else:
return None | Gets the next event that should occur given the current time.
:return: The time of the next event, or None if there is no pending event. | voting/vote_manager.py | get_pending_profile_change | NewtC1/Salamandbot2.0 | 2 | python | def get_pending_profile_change():
'\n Gets the next event that should occur given the current time.\n :return: The time of the next event, or None if there is no pending event.\n '
schedule = hf.get_vote_data()['Active Profile Schedule']
if schedule:
schedule_times = []
now = time.localtime()
for item_time in schedule.keys():
new_time = time.strptime(f'{now.tm_year} {now.tm_mon} {now.tm_mday} {item_time}', '%Y %m %d %H:%M')
schedule_times.append(new_time)
schedule_times.sort()
next_profile_change = None
if schedule_times:
for times in schedule_times:
if (times > now):
break
next_profile_change = times
if next_profile_change:
schedule_index = f'{next_profile_change[3]}:{str(next_profile_change[4]).zfill(2)}'
if (hf.get_active_profile() == schedule[schedule_index]):
return None
else:
schedule_index = f'{next_profile_change[3]}:{str(next_profile_change[4]).zfill(2)}'
return schedule_index
else:
return None
else:
return None | def get_pending_profile_change():
'\n Gets the next event that should occur given the current time.\n :return: The time of the next event, or None if there is no pending event.\n '
schedule = hf.get_vote_data()['Active Profile Schedule']
if schedule:
schedule_times = []
now = time.localtime()
for item_time in schedule.keys():
new_time = time.strptime(f'{now.tm_year} {now.tm_mon} {now.tm_mday} {item_time}', '%Y %m %d %H:%M')
schedule_times.append(new_time)
schedule_times.sort()
next_profile_change = None
if schedule_times:
for times in schedule_times:
if (times > now):
break
next_profile_change = times
if next_profile_change:
schedule_index = f'{next_profile_change[3]}:{str(next_profile_change[4]).zfill(2)}'
if (hf.get_active_profile() == schedule[schedule_index]):
return None
else:
schedule_index = f'{next_profile_change[3]}:{str(next_profile_change[4]).zfill(2)}'
return schedule_index
else:
return None
else:
return None<|docstring|>Gets the next event that should occur given the current time.
:return: The time of the next event, or None if there is no pending event.<|endoftext|> |
c57eb9c6eb5d414ba4ae8200836d63e5e4efab5597d4e3a91fdb04196332010a | def __init__(self, temboo_session):
'\n Create a new instance of the RemovePermission Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(RemovePermission, self).__init__(temboo_session, '/Library/Amazon/SNS/RemovePermission') | Create a new instance of the RemovePermission Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied. | temboo/core/Library/Amazon/SNS/RemovePermission.py | __init__ | jordanemedlock/psychtruths | 7 | python | def __init__(self, temboo_session):
'\n Create a new instance of the RemovePermission Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(RemovePermission, self).__init__(temboo_session, '/Library/Amazon/SNS/RemovePermission') | def __init__(self, temboo_session):
'\n Create a new instance of the RemovePermission Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(RemovePermission, self).__init__(temboo_session, '/Library/Amazon/SNS/RemovePermission')<|docstring|>Create a new instance of the RemovePermission Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.<|endoftext|> |
06b6af9fc50245663746791ebcdb3ad43fc3e75244a38085b3266bc1bb8f9d50 | def set_AWSAccessKeyId(self, value):
'\n Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)\n '
super(RemovePermissionInputSet, self)._set_input('AWSAccessKeyId', value) | Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.) | temboo/core/Library/Amazon/SNS/RemovePermission.py | set_AWSAccessKeyId | jordanemedlock/psychtruths | 7 | python | def set_AWSAccessKeyId(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('AWSAccessKeyId', value) | def set_AWSAccessKeyId(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('AWSAccessKeyId', value)<|docstring|>Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)<|endoftext|> |
ef0bb195a4c9c2d847dab19ce941d301c46df4f9d4832ec307c1e553f26701ae | def set_AWSSecretKeyId(self, value):
'\n Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)\n '
super(RemovePermissionInputSet, self)._set_input('AWSSecretKeyId', value) | Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.) | temboo/core/Library/Amazon/SNS/RemovePermission.py | set_AWSSecretKeyId | jordanemedlock/psychtruths | 7 | python | def set_AWSSecretKeyId(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('AWSSecretKeyId', value) | def set_AWSSecretKeyId(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('AWSSecretKeyId', value)<|docstring|>Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)<|endoftext|> |
29b8ffdd05e77ab5c6d0a89b5416391bb514877247cf72f5ebdda9d11f29161e | def set_Label(self, value):
'\n Set the value of the Label input for this Choreo. ((required, string) The unique identifier for the policy statement that you want to delete.)\n '
super(RemovePermissionInputSet, self)._set_input('Label', value) | Set the value of the Label input for this Choreo. ((required, string) The unique identifier for the policy statement that you want to delete.) | temboo/core/Library/Amazon/SNS/RemovePermission.py | set_Label | jordanemedlock/psychtruths | 7 | python | def set_Label(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('Label', value) | def set_Label(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('Label', value)<|docstring|>Set the value of the Label input for this Choreo. ((required, string) The unique identifier for the policy statement that you want to delete.)<|endoftext|> |
1cf15b19f00b47adcfc035517f05bf40ee35d893ace79c4599f4c4bca19e2bdd | def set_TopicArn(self, value):
'\n Set the value of the TopicArn input for this Choreo. ((required, string) The ARN of the topic that has an access control policy you want to adjust.)\n '
super(RemovePermissionInputSet, self)._set_input('TopicArn', value) | Set the value of the TopicArn input for this Choreo. ((required, string) The ARN of the topic that has an access control policy you want to adjust.) | temboo/core/Library/Amazon/SNS/RemovePermission.py | set_TopicArn | jordanemedlock/psychtruths | 7 | python | def set_TopicArn(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('TopicArn', value) | def set_TopicArn(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('TopicArn', value)<|docstring|>Set the value of the TopicArn input for this Choreo. ((required, string) The ARN of the topic that has an access control policy you want to adjust.)<|endoftext|> |
c0243b79bd42d5c24a1df77a94d1d95c5ea7198075df1977772bc62ac79a59e2 | def set_UserRegion(self, value):
'\n Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the SNS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)\n '
super(RemovePermissionInputSet, self)._set_input('UserRegion', value) | Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the SNS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.) | temboo/core/Library/Amazon/SNS/RemovePermission.py | set_UserRegion | jordanemedlock/psychtruths | 7 | python | def set_UserRegion(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('UserRegion', value) | def set_UserRegion(self, value):
'\n \n '
super(RemovePermissionInputSet, self)._set_input('UserRegion', value)<|docstring|>Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the SNS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)<|endoftext|> |
0f4bf72526e6881f4e743d1e7e71a69acd4249482df4b1387a9adf50372a9f91 | def get_Response(self):
'\n Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Amazon.)\n '
return self._output.get('Response', None) | Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Amazon.) | temboo/core/Library/Amazon/SNS/RemovePermission.py | get_Response | jordanemedlock/psychtruths | 7 | python | def get_Response(self):
'\n \n '
return self._output.get('Response', None) | def get_Response(self):
'\n \n '
return self._output.get('Response', None)<|docstring|>Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Amazon.)<|endoftext|> |
57c7cd89bc2dcfbfaac0c484a44ba48c65fffe3c9f6a84c6101906bc2ae27e9b | def test_horizons_constructor_requires_timezone_aware_start_time():
'Test that the constructor requires a timezone-aware start time.'
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=datetime(2022, 2, 8, 0, 0, 0, 0), end=datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc))
assert ('start' in str(excinfo.value)) | Test that the constructor requires a timezone-aware start time. | tests/service/test_horizons.py | test_horizons_constructor_requires_timezone_aware_start_time | saltastroops/imephu | 0 | python | def test_horizons_constructor_requires_timezone_aware_start_time():
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=datetime(2022, 2, 8, 0, 0, 0, 0), end=datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc))
assert ('start' in str(excinfo.value)) | def test_horizons_constructor_requires_timezone_aware_start_time():
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=datetime(2022, 2, 8, 0, 0, 0, 0), end=datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc))
assert ('start' in str(excinfo.value))<|docstring|>Test that the constructor requires a timezone-aware start time.<|endoftext|> |
50c05c8fb5bff8f08486943be6e0698a2dc5e954b18e5e424394065ffe7ddd0d | def test_horizons_constructor_requires_timezone_aware_end_time():
'Test that the constructor requires a timezone-aware end time.'
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=datetime(2022, 2, 8, 0, 0, 0, 0, tzinfo=timezone.utc), end=datetime(2022, 2, 8, 1, 0, 0, 0))
assert ('end' in str(excinfo.value)) | Test that the constructor requires a timezone-aware end time. | tests/service/test_horizons.py | test_horizons_constructor_requires_timezone_aware_end_time | saltastroops/imephu | 0 | python | def test_horizons_constructor_requires_timezone_aware_end_time():
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=datetime(2022, 2, 8, 0, 0, 0, 0, tzinfo=timezone.utc), end=datetime(2022, 2, 8, 1, 0, 0, 0))
assert ('end' in str(excinfo.value)) | def test_horizons_constructor_requires_timezone_aware_end_time():
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=datetime(2022, 2, 8, 0, 0, 0, 0, tzinfo=timezone.utc), end=datetime(2022, 2, 8, 1, 0, 0, 0))
assert ('end' in str(excinfo.value))<|docstring|>Test that the constructor requires a timezone-aware end time.<|endoftext|> |
eee4445b9f7da6d4e8283fd11f3459bad4623631abd77dcaa08146dcfe1a5c08 | @pytest.mark.parametrize('start', [datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc), datetime(2022, 2, 8, 1, 0, 1, 0, tzinfo=timezone.utc)])
def test_horizons_start_and_end_time_must_be_consistent(start):
'Test that the constructor start time must be earlier than the end time.'
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=start, end=datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc))
assert ('earlier' in str(excinfo.value)) | Test that the constructor start time must be earlier than the end time. | tests/service/test_horizons.py | test_horizons_start_and_end_time_must_be_consistent | saltastroops/imephu | 0 | python | @pytest.mark.parametrize('start', [datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc), datetime(2022, 2, 8, 1, 0, 1, 0, tzinfo=timezone.utc)])
def test_horizons_start_and_end_time_must_be_consistent(start):
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=start, end=datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc))
assert ('earlier' in str(excinfo.value)) | @pytest.mark.parametrize('start', [datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc), datetime(2022, 2, 8, 1, 0, 1, 0, tzinfo=timezone.utc)])
def test_horizons_start_and_end_time_must_be_consistent(start):
with pytest.raises(ValueError) as excinfo:
HorizonsService(object_id='Ceres', location='B31', start=start, end=datetime(2022, 2, 8, 1, 0, 0, 0, tzinfo=timezone.utc))
assert ('earlier' in str(excinfo.value))<|docstring|>Test that the constructor start time must be earlier than the end time.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.