body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def __init__(self, filenames):
'Create a `SequenceFileDataset`.\n\n `SequenceFileDataset` allows a user to read data from a hadoop sequence\n file. A sequence file consists of (key value) pairs sequentially. At\n the moment, `org.apache.hadoop.io.Text` is the only serialization type\n being supported, and there is no compression support.\n\n For example:\n\n ```python\n dataset = SequenceFileDataset("/foo/bar.seq")\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n # Prints the (key, value) pairs inside a hadoop sequence file.\n while True:\n try:\n print(sess.run(next_element))\n except tf.errors.OutOfRangeError:\n break\n ```\n\n Args:\n filenames: A `tf.string` tensor containing one or more filenames.\n '
self._filenames = tf.convert_to_tensor(filenames, dtype=dtypes.string, name='filenames')
super(SequenceFileDataset, self).__init__()
| -758,520,326,914,486,700
|
Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = SequenceFileDataset("/foo/bar.seq")
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
|
tensorflow_io/hadoop/python/ops/hadoop_dataset_ops.py
|
__init__
|
HubBucket-Team/io
|
python
|
def __init__(self, filenames):
'Create a `SequenceFileDataset`.\n\n `SequenceFileDataset` allows a user to read data from a hadoop sequence\n file. A sequence file consists of (key value) pairs sequentially. At\n the moment, `org.apache.hadoop.io.Text` is the only serialization type\n being supported, and there is no compression support.\n\n For example:\n\n ```python\n dataset = SequenceFileDataset("/foo/bar.seq")\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n # Prints the (key, value) pairs inside a hadoop sequence file.\n while True:\n try:\n print(sess.run(next_element))\n except tf.errors.OutOfRangeError:\n break\n ```\n\n Args:\n filenames: A `tf.string` tensor containing one or more filenames.\n '
self._filenames = tf.convert_to_tensor(filenames, dtype=dtypes.string, name='filenames')
super(SequenceFileDataset, self).__init__()
|
def create_net(self, shape, scale, ir_version):
'\n ONNX net IR net\n\n Input->ImageScaler->Output => Input->ScaleShift(Power)\n\n '
import onnx
from onnx import helper
from onnx import TensorProto
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
bias = np.random.randint((- 10), 10, shape[1]).astype(np.float)
node_def = onnx.helper.make_node('ImageScaler', inputs=['input'], outputs=['output'], bias=bias, scale=scale)
graph_def = helper.make_graph([node_def], 'test_model', [input], [output])
onnx_net = helper.make_model(graph_def, producer_name='test_model')
ref_net = None
return (onnx_net, ref_net)
| 1,688,503,208,243,325,000
|
ONNX net IR net
Input->ImageScaler->Output => Input->ScaleShift(Power)
|
tests/layer_tests/onnx_tests/test_image_scaler.py
|
create_net
|
3Demonica/openvino
|
python
|
def create_net(self, shape, scale, ir_version):
'\n ONNX net IR net\n\n Input->ImageScaler->Output => Input->ScaleShift(Power)\n\n '
import onnx
from onnx import helper
from onnx import TensorProto
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
bias = np.random.randint((- 10), 10, shape[1]).astype(np.float)
node_def = onnx.helper.make_node('ImageScaler', inputs=['input'], outputs=['output'], bias=bias, scale=scale)
graph_def = helper.make_graph([node_def], 'test_model', [input], [output])
onnx_net = helper.make_model(graph_def, producer_name='test_model')
ref_net = None
return (onnx_net, ref_net)
|
def create_net_const(self, shape, scale, precision, ir_version):
'\n ONNX net IR net\n\n Input->Concat(+scaled const)->Output => Input->Concat(+const)\n\n '
import onnx
from onnx import helper
from onnx import TensorProto
concat_axis = 0
output_shape = shape.copy()
output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
constant = np.random.randint((- 127), 127, shape).astype(np.float)
bias = np.random.randint((- 10), 10, shape[1]).astype(np.float)
node_const_def = onnx.helper.make_node('Constant', inputs=[], outputs=['const1'], value=helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=constant.shape, vals=constant.flatten()))
node_def = onnx.helper.make_node('ImageScaler', inputs=['const1'], outputs=['scale'], bias=bias, scale=scale)
node_concat_def = onnx.helper.make_node('Concat', inputs=['input', 'scale'], outputs=['output'], axis=concat_axis)
graph_def = helper.make_graph([node_const_def, node_def, node_concat_def], 'test_model', [input], [output])
onnx_net = helper.make_model(graph_def, producer_name='test_model')
ir_const = ((constant * scale) + np.expand_dims(np.expand_dims([bias], 2), 3))
if (precision == 'FP16'):
ir_const = ir_const.astype(np.float16)
ref_net = None
return (onnx_net, ref_net)
| 5,785,908,737,829,401,000
|
ONNX net IR net
Input->Concat(+scaled const)->Output => Input->Concat(+const)
|
tests/layer_tests/onnx_tests/test_image_scaler.py
|
create_net_const
|
3Demonica/openvino
|
python
|
def create_net_const(self, shape, scale, precision, ir_version):
'\n ONNX net IR net\n\n Input->Concat(+scaled const)->Output => Input->Concat(+const)\n\n '
import onnx
from onnx import helper
from onnx import TensorProto
concat_axis = 0
output_shape = shape.copy()
output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
constant = np.random.randint((- 127), 127, shape).astype(np.float)
bias = np.random.randint((- 10), 10, shape[1]).astype(np.float)
node_const_def = onnx.helper.make_node('Constant', inputs=[], outputs=['const1'], value=helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=constant.shape, vals=constant.flatten()))
node_def = onnx.helper.make_node('ImageScaler', inputs=['const1'], outputs=['scale'], bias=bias, scale=scale)
node_concat_def = onnx.helper.make_node('Concat', inputs=['input', 'scale'], outputs=['output'], axis=concat_axis)
graph_def = helper.make_graph([node_const_def, node_def, node_concat_def], 'test_model', [input], [output])
onnx_net = helper.make_model(graph_def, producer_name='test_model')
ir_const = ((constant * scale) + np.expand_dims(np.expand_dims([bias], 2), 3))
if (precision == 'FP16'):
ir_const = ir_const.astype(np.float16)
ref_net = None
return (onnx_net, ref_net)
|
def start(self):
'Starts agent-environment interaction.'
self.callbacks.on_interaction_begin()
while self.should_continue():
self.callbacks.on_episode_begin(self.episode)
self.env.reset()
self.step = 0
while (not self.env.is_terminal()):
self.callbacks.on_step_begin(self.step)
state = self.env.get_state()
action = self.agent.get_action(state)
(next_state, reward, done, info) = self.env.step(action)
experience = Experience(state, action, reward, next_state, done)
self.agent.update(experience)
self.callbacks.on_step_end(self.step)
self.step += 1
self.callbacks.on_episode_end(self.episode, self.step)
self.episode += 1
self.callbacks.on_interaction_end(self.episode)
| -2,936,708,797,581,038,600
|
Starts agent-environment interaction.
|
myelin/core/interactions.py
|
start
|
davidrobles/myelin
|
python
|
def start(self):
self.callbacks.on_interaction_begin()
while self.should_continue():
self.callbacks.on_episode_begin(self.episode)
self.env.reset()
self.step = 0
while (not self.env.is_terminal()):
self.callbacks.on_step_begin(self.step)
state = self.env.get_state()
action = self.agent.get_action(state)
(next_state, reward, done, info) = self.env.step(action)
experience = Experience(state, action, reward, next_state, done)
self.agent.update(experience)
self.callbacks.on_step_end(self.step)
self.step += 1
self.callbacks.on_episode_end(self.episode, self.step)
self.episode += 1
self.callbacks.on_interaction_end(self.episode)
|
def _find_boundaries_subpixel(label_img):
'See ``find_boundaries(..., mode=\'subpixel\')``.\n\n Notes\n -----\n This function puts in an empty row and column between each *actual*\n row and column of the image, for a corresponding shape of $2s - 1$\n for every image dimension of size $s$. These "interstitial" rows\n and columns are filled as ``True`` if they separate two labels in\n `label_img`, ``False`` otherwise.\n\n I used ``view_as_windows`` to get the neighborhood of each pixel.\n Then I check whether there are two labels or more in that\n neighborhood.\n '
ndim = label_img.ndim
max_label = np.iinfo(label_img.dtype).max
label_img_expanded = np.zeros([((2 * s) - 1) for s in label_img.shape], label_img.dtype)
pixels = ((slice(None, None, 2),) * ndim)
label_img_expanded[pixels] = label_img
edges = np.ones(label_img_expanded.shape, dtype=bool)
edges[pixels] = False
label_img_expanded[edges] = max_label
windows = view_as_windows(np.pad(label_img_expanded, 1, mode='constant', constant_values=0), ((3,) * ndim))
boundaries = np.zeros_like(edges)
for index in np.ndindex(label_img_expanded.shape):
if edges[index]:
values = np.unique(windows[index].ravel())
if (len(values) > 2):
boundaries[index] = True
return boundaries
| -8,794,061,428,085,247,000
|
See ``find_boundaries(..., mode='subpixel')``.
Notes
-----
This function puts in an empty row and column between each *actual*
row and column of the image, for a corresponding shape of $2s - 1$
for every image dimension of size $s$. These "interstitial" rows
and columns are filled as ``True`` if they separate two labels in
`label_img`, ``False`` otherwise.
I used ``view_as_windows`` to get the neighborhood of each pixel.
Then I check whether there are two labels or more in that
neighborhood.
|
venv/lib/python3.8/site-packages/skimage/segmentation/boundaries.py
|
_find_boundaries_subpixel
|
IZ-ZI/-EECS-393-_Attendance-System
|
python
|
def _find_boundaries_subpixel(label_img):
'See ``find_boundaries(..., mode=\'subpixel\')``.\n\n Notes\n -----\n This function puts in an empty row and column between each *actual*\n row and column of the image, for a corresponding shape of $2s - 1$\n for every image dimension of size $s$. These "interstitial" rows\n and columns are filled as ``True`` if they separate two labels in\n `label_img`, ``False`` otherwise.\n\n I used ``view_as_windows`` to get the neighborhood of each pixel.\n Then I check whether there are two labels or more in that\n neighborhood.\n '
ndim = label_img.ndim
max_label = np.iinfo(label_img.dtype).max
label_img_expanded = np.zeros([((2 * s) - 1) for s in label_img.shape], label_img.dtype)
pixels = ((slice(None, None, 2),) * ndim)
label_img_expanded[pixels] = label_img
edges = np.ones(label_img_expanded.shape, dtype=bool)
edges[pixels] = False
label_img_expanded[edges] = max_label
windows = view_as_windows(np.pad(label_img_expanded, 1, mode='constant', constant_values=0), ((3,) * ndim))
boundaries = np.zeros_like(edges)
for index in np.ndindex(label_img_expanded.shape):
if edges[index]:
values = np.unique(windows[index].ravel())
if (len(values) > 2):
boundaries[index] = True
return boundaries
|
def find_boundaries(label_img, connectivity=1, mode='thick', background=0):
"Return bool array where boundaries between labeled regions are True.\n\n Parameters\n ----------\n label_img : array of int or bool\n An array in which different regions are labeled with either different\n integers or boolean values.\n connectivity: int in {1, ..., `label_img.ndim`}, optional\n A pixel is considered a boundary pixel if any of its neighbors\n has a different label. `connectivity` controls which pixels are\n considered neighbors. A connectivity of 1 (default) means\n pixels sharing an edge (in 2D) or a face (in 3D) will be\n considered neighbors. A connectivity of `label_img.ndim` means\n pixels sharing a corner will be considered neighbors.\n mode: string in {'thick', 'inner', 'outer', 'subpixel'}\n How to mark the boundaries:\n\n - thick: any pixel not completely surrounded by pixels of the\n same label (defined by `connectivity`) is marked as a boundary.\n This results in boundaries that are 2 pixels thick.\n - inner: outline the pixels *just inside* of objects, leaving\n background pixels untouched.\n - outer: outline pixels in the background around object\n boundaries. When two objects touch, their boundary is also\n marked.\n - subpixel: return a doubled image, with pixels *between* the\n original pixels marked as boundary where appropriate.\n background: int, optional\n For modes 'inner' and 'outer', a definition of a background\n label is required. See `mode` for descriptions of these two.\n\n Returns\n -------\n boundaries : array of bool, same shape as `label_img`\n A bool image where ``True`` represents a boundary pixel. For\n `mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal\n to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is\n inserted in between all other pairs of pixels).\n\n Examples\n --------\n >>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)\n >>> find_boundaries(labels, mode='thick').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n [0, 1, 1, 0, 1, 1, 0, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels, mode='inner').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 1, 0, 0],\n [0, 0, 1, 0, 1, 1, 0, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels, mode='outer').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 0, 1, 1, 1, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> labels_small = labels[::2, ::3]\n >>> labels_small\n array([[0, 0, 0, 0],\n [0, 0, 5, 0],\n [0, 1, 5, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 0],\n [0, 1, 0, 1, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 0],\n [0, 0, 0, 1, 0, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> bool_image = np.array([[False, False, False, False, False],\n ... [False, False, False, False, False],\n ... [False, False, True, True, True],\n ... [False, False, True, True, True],\n ... [False, False, True, True, True]], dtype=np.bool)\n >>> find_boundaries(bool_image)\n array([[False, False, False, False, False],\n [False, False, True, True, True],\n [False, True, True, True, True],\n [False, True, True, False, False],\n [False, True, True, False, False]], dtype=bool)\n "
if (label_img.dtype == 'bool'):
label_img = label_img.astype(np.uint8)
ndim = label_img.ndim
selem = ndi.generate_binary_structure(ndim, connectivity)
if (mode != 'subpixel'):
boundaries = (dilation(label_img, selem) != erosion(label_img, selem))
if (mode == 'inner'):
foreground_image = (label_img != background)
boundaries &= foreground_image
elif (mode == 'outer'):
max_label = np.iinfo(label_img.dtype).max
background_image = (label_img == background)
selem = ndi.generate_binary_structure(ndim, ndim)
inverted_background = np.array(label_img, copy=True)
inverted_background[background_image] = max_label
adjacent_objects = ((dilation(label_img, selem) != erosion(inverted_background, selem)) & (~ background_image))
boundaries &= (background_image | adjacent_objects)
return boundaries
else:
boundaries = _find_boundaries_subpixel(label_img)
return boundaries
| 9,124,155,518,888,762,000
|
Return bool array where boundaries between labeled regions are True.
Parameters
----------
label_img : array of int or bool
An array in which different regions are labeled with either different
integers or boolean values.
connectivity: int in {1, ..., `label_img.ndim`}, optional
A pixel is considered a boundary pixel if any of its neighbors
has a different label. `connectivity` controls which pixels are
considered neighbors. A connectivity of 1 (default) means
pixels sharing an edge (in 2D) or a face (in 3D) will be
considered neighbors. A connectivity of `label_img.ndim` means
pixels sharing a corner will be considered neighbors.
mode: string in {'thick', 'inner', 'outer', 'subpixel'}
How to mark the boundaries:
- thick: any pixel not completely surrounded by pixels of the
same label (defined by `connectivity`) is marked as a boundary.
This results in boundaries that are 2 pixels thick.
- inner: outline the pixels *just inside* of objects, leaving
background pixels untouched.
- outer: outline pixels in the background around object
boundaries. When two objects touch, their boundary is also
marked.
- subpixel: return a doubled image, with pixels *between* the
original pixels marked as boundary where appropriate.
background: int, optional
For modes 'inner' and 'outer', a definition of a background
label is required. See `mode` for descriptions of these two.
Returns
-------
boundaries : array of bool, same shape as `label_img`
A bool image where ``True`` represents a boundary pixel. For
`mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal
to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is
inserted in between all other pairs of pixels).
Examples
--------
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
>>> find_boundaries(labels, mode='thick').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='inner').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='outer').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> labels_small = labels[::2, ::3]
>>> labels_small
array([[0, 0, 0, 0],
[0, 0, 5, 0],
[0, 1, 5, 0],
[0, 0, 5, 0],
[0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> bool_image = np.array([[False, False, False, False, False],
... [False, False, False, False, False],
... [False, False, True, True, True],
... [False, False, True, True, True],
... [False, False, True, True, True]], dtype=np.bool)
>>> find_boundaries(bool_image)
array([[False, False, False, False, False],
[False, False, True, True, True],
[False, True, True, True, True],
[False, True, True, False, False],
[False, True, True, False, False]], dtype=bool)
|
venv/lib/python3.8/site-packages/skimage/segmentation/boundaries.py
|
find_boundaries
|
IZ-ZI/-EECS-393-_Attendance-System
|
python
|
def find_boundaries(label_img, connectivity=1, mode='thick', background=0):
"Return bool array where boundaries between labeled regions are True.\n\n Parameters\n ----------\n label_img : array of int or bool\n An array in which different regions are labeled with either different\n integers or boolean values.\n connectivity: int in {1, ..., `label_img.ndim`}, optional\n A pixel is considered a boundary pixel if any of its neighbors\n has a different label. `connectivity` controls which pixels are\n considered neighbors. A connectivity of 1 (default) means\n pixels sharing an edge (in 2D) or a face (in 3D) will be\n considered neighbors. A connectivity of `label_img.ndim` means\n pixels sharing a corner will be considered neighbors.\n mode: string in {'thick', 'inner', 'outer', 'subpixel'}\n How to mark the boundaries:\n\n - thick: any pixel not completely surrounded by pixels of the\n same label (defined by `connectivity`) is marked as a boundary.\n This results in boundaries that are 2 pixels thick.\n - inner: outline the pixels *just inside* of objects, leaving\n background pixels untouched.\n - outer: outline pixels in the background around object\n boundaries. When two objects touch, their boundary is also\n marked.\n - subpixel: return a doubled image, with pixels *between* the\n original pixels marked as boundary where appropriate.\n background: int, optional\n For modes 'inner' and 'outer', a definition of a background\n label is required. See `mode` for descriptions of these two.\n\n Returns\n -------\n boundaries : array of bool, same shape as `label_img`\n A bool image where ``True`` represents a boundary pixel. For\n `mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal\n to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is\n inserted in between all other pairs of pixels).\n\n Examples\n --------\n >>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)\n >>> find_boundaries(labels, mode='thick').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n [0, 1, 1, 0, 1, 1, 0, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels, mode='inner').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 1, 0, 0],\n [0, 0, 1, 0, 1, 1, 0, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels, mode='outer').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 0, 1, 1, 1, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> labels_small = labels[::2, ::3]\n >>> labels_small\n array([[0, 0, 0, 0],\n [0, 0, 5, 0],\n [0, 1, 5, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 0],\n [0, 1, 0, 1, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 0],\n [0, 0, 0, 1, 0, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> bool_image = np.array([[False, False, False, False, False],\n ... [False, False, False, False, False],\n ... [False, False, True, True, True],\n ... [False, False, True, True, True],\n ... [False, False, True, True, True]], dtype=np.bool)\n >>> find_boundaries(bool_image)\n array([[False, False, False, False, False],\n [False, False, True, True, True],\n [False, True, True, True, True],\n [False, True, True, False, False],\n [False, True, True, False, False]], dtype=bool)\n "
if (label_img.dtype == 'bool'):
label_img = label_img.astype(np.uint8)
ndim = label_img.ndim
selem = ndi.generate_binary_structure(ndim, connectivity)
if (mode != 'subpixel'):
boundaries = (dilation(label_img, selem) != erosion(label_img, selem))
if (mode == 'inner'):
foreground_image = (label_img != background)
boundaries &= foreground_image
elif (mode == 'outer'):
max_label = np.iinfo(label_img.dtype).max
background_image = (label_img == background)
selem = ndi.generate_binary_structure(ndim, ndim)
inverted_background = np.array(label_img, copy=True)
inverted_background[background_image] = max_label
adjacent_objects = ((dilation(label_img, selem) != erosion(inverted_background, selem)) & (~ background_image))
boundaries &= (background_image | adjacent_objects)
return boundaries
else:
boundaries = _find_boundaries_subpixel(label_img)
return boundaries
|
def mark_boundaries(image, label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0):
"Return image with boundaries between labeled regions highlighted.\n\n Parameters\n ----------\n image : (M, N[, 3]) array\n Grayscale or RGB image.\n label_img : (M, N) array of int\n Label array where regions are marked by different integer values.\n color : length-3 sequence, optional\n RGB color of boundaries in the output image.\n outline_color : length-3 sequence, optional\n RGB color surrounding boundaries in the output image. If None, no\n outline is drawn.\n mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional\n The mode for finding boundaries.\n background_label : int, optional\n Which label to consider background (this is only useful for\n modes ``inner`` and ``outer``).\n\n Returns\n -------\n marked : (M, N, 3) array of float\n An image in which the boundaries between labels are\n superimposed on the original image.\n\n See Also\n --------\n find_boundaries\n "
marked = img_as_float(image, force_copy=True)
if (marked.ndim == 2):
marked = gray2rgb(marked)
if (mode == 'subpixel'):
marked = ndi.zoom(marked, ([(2 - (1 / s)) for s in marked.shape[:(- 1)]] + [1]), mode='reflect')
boundaries = find_boundaries(label_img, mode=mode, background=background_label)
if (outline_color is not None):
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
| 7,373,605,340,623,127,000
|
Return image with boundaries between labeled regions highlighted.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
label_img : (M, N) array of int
Label array where regions are marked by different integer values.
color : length-3 sequence, optional
RGB color of boundaries in the output image.
outline_color : length-3 sequence, optional
RGB color surrounding boundaries in the output image. If None, no
outline is drawn.
mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional
The mode for finding boundaries.
background_label : int, optional
Which label to consider background (this is only useful for
modes ``inner`` and ``outer``).
Returns
-------
marked : (M, N, 3) array of float
An image in which the boundaries between labels are
superimposed on the original image.
See Also
--------
find_boundaries
|
venv/lib/python3.8/site-packages/skimage/segmentation/boundaries.py
|
mark_boundaries
|
IZ-ZI/-EECS-393-_Attendance-System
|
python
|
def mark_boundaries(image, label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0):
"Return image with boundaries between labeled regions highlighted.\n\n Parameters\n ----------\n image : (M, N[, 3]) array\n Grayscale or RGB image.\n label_img : (M, N) array of int\n Label array where regions are marked by different integer values.\n color : length-3 sequence, optional\n RGB color of boundaries in the output image.\n outline_color : length-3 sequence, optional\n RGB color surrounding boundaries in the output image. If None, no\n outline is drawn.\n mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional\n The mode for finding boundaries.\n background_label : int, optional\n Which label to consider background (this is only useful for\n modes ``inner`` and ``outer``).\n\n Returns\n -------\n marked : (M, N, 3) array of float\n An image in which the boundaries between labels are\n superimposed on the original image.\n\n See Also\n --------\n find_boundaries\n "
marked = img_as_float(image, force_copy=True)
if (marked.ndim == 2):
marked = gray2rgb(marked)
if (mode == 'subpixel'):
marked = ndi.zoom(marked, ([(2 - (1 / s)) for s in marked.shape[:(- 1)]] + [1]), mode='reflect')
boundaries = find_boundaries(label_img, mode=mode, background=background_label)
if (outline_color is not None):
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
|
def format_options(self, ctx, formatter) -> None:
'Writes all the options into the formatter if they exist.'
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if (rv is not None):
opts.append(rv)
if opts:
with formatter.section('Options'):
self.write_dl(formatter, opts)
| -7,876,094,872,403,372,000
|
Writes all the options into the formatter if they exist.
|
src/cli.py
|
format_options
|
yellowdog/virtual-screening-public
|
python
|
def format_options(self, ctx, formatter) -> None:
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if (rv is not None):
opts.append(rv)
if opts:
with formatter.section('Options'):
self.write_dl(formatter, opts)
|
def get_min_fee_rate(self, cost: int) -> float:
'\n Gets the minimum fpc rate that a transaction with specified cost will need in order to get included.\n '
if self.at_full_capacity(cost):
current_cost = self.total_mempool_cost
for (fee_per_cost, spends_with_fpc) in self.sorted_spends.items():
for (spend_name, item) in spends_with_fpc.items():
current_cost -= item.cost
if ((current_cost + cost) <= self.max_size_in_cost):
return fee_per_cost
raise ValueError(f'Transaction with cost {cost} does not fit in mempool of max cost {self.max_size_in_cost}')
else:
return 0
| 6,172,221,950,805,018,000
|
Gets the minimum fpc rate that a transaction with specified cost will need in order to get included.
|
shamrock/full_node/mempool.py
|
get_min_fee_rate
|
zcomputerwiz/shamrock-blockchain
|
python
|
def get_min_fee_rate(self, cost: int) -> float:
'\n \n '
if self.at_full_capacity(cost):
current_cost = self.total_mempool_cost
for (fee_per_cost, spends_with_fpc) in self.sorted_spends.items():
for (spend_name, item) in spends_with_fpc.items():
current_cost -= item.cost
if ((current_cost + cost) <= self.max_size_in_cost):
return fee_per_cost
raise ValueError(f'Transaction with cost {cost} does not fit in mempool of max cost {self.max_size_in_cost}')
else:
return 0
|
def remove_from_pool(self, item: MempoolItem):
'\n Removes an item from the mempool.\n '
removals: List[Coin] = item.removals
additions: List[Coin] = item.additions
for rem in removals:
del self.removals[rem.name()]
for add in additions:
del self.additions[add.name()]
del self.spends[item.name]
del self.sorted_spends[item.fee_per_cost][item.name]
dic = self.sorted_spends[item.fee_per_cost]
if (len(dic.values()) == 0):
del self.sorted_spends[item.fee_per_cost]
self.total_mempool_cost -= item.cost
assert (self.total_mempool_cost >= 0)
| -3,248,792,043,505,881,600
|
Removes an item from the mempool.
|
shamrock/full_node/mempool.py
|
remove_from_pool
|
zcomputerwiz/shamrock-blockchain
|
python
|
def remove_from_pool(self, item: MempoolItem):
'\n \n '
removals: List[Coin] = item.removals
additions: List[Coin] = item.additions
for rem in removals:
del self.removals[rem.name()]
for add in additions:
del self.additions[add.name()]
del self.spends[item.name]
del self.sorted_spends[item.fee_per_cost][item.name]
dic = self.sorted_spends[item.fee_per_cost]
if (len(dic.values()) == 0):
del self.sorted_spends[item.fee_per_cost]
self.total_mempool_cost -= item.cost
assert (self.total_mempool_cost >= 0)
|
def add_to_pool(self, item: MempoolItem):
"\n Adds an item to the mempool by kicking out transactions (if it doesn't fit), in order of increasing fee per cost\n "
while self.at_full_capacity(item.cost):
(fee_per_cost, val) = self.sorted_spends.peekitem(index=0)
to_remove = list(val.values())[0]
self.remove_from_pool(to_remove)
self.spends[item.name] = item
if (item.fee_per_cost not in self.sorted_spends):
self.sorted_spends[item.fee_per_cost] = {}
self.sorted_spends[item.fee_per_cost][item.name] = item
for add in item.additions:
self.additions[add.name()] = item
for coin in item.removals:
self.removals[coin.name()] = item
self.total_mempool_cost += item.cost
| 5,049,865,894,518,057,000
|
Adds an item to the mempool by kicking out transactions (if it doesn't fit), in order of increasing fee per cost
|
shamrock/full_node/mempool.py
|
add_to_pool
|
zcomputerwiz/shamrock-blockchain
|
python
|
def add_to_pool(self, item: MempoolItem):
"\n \n "
while self.at_full_capacity(item.cost):
(fee_per_cost, val) = self.sorted_spends.peekitem(index=0)
to_remove = list(val.values())[0]
self.remove_from_pool(to_remove)
self.spends[item.name] = item
if (item.fee_per_cost not in self.sorted_spends):
self.sorted_spends[item.fee_per_cost] = {}
self.sorted_spends[item.fee_per_cost][item.name] = item
for add in item.additions:
self.additions[add.name()] = item
for coin in item.removals:
self.removals[coin.name()] = item
self.total_mempool_cost += item.cost
|
def at_full_capacity(self, cost: int) -> bool:
'\n Checks whether the mempool is at full capacity and cannot accept a transaction with size cost.\n '
return ((self.total_mempool_cost + cost) > self.max_size_in_cost)
| 4,300,916,430,235,673,000
|
Checks whether the mempool is at full capacity and cannot accept a transaction with size cost.
|
shamrock/full_node/mempool.py
|
at_full_capacity
|
zcomputerwiz/shamrock-blockchain
|
python
|
def at_full_capacity(self, cost: int) -> bool:
'\n \n '
return ((self.total_mempool_cost + cost) > self.max_size_in_cost)
|
@exporter.export
@preprocess_xarray
def resample_nn_1d(a, centers):
'Return one-dimensional nearest-neighbor indexes based on user-specified centers.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of numeric values from which to\n extract indexes of nearest-neighbors\n centers : array-like\n 1-dimensional array of numeric values representing a subset of values to approximate\n\n Returns\n -------\n An array of indexes representing values closest to given array values\n\n '
ix = []
for center in centers:
index = np.abs((a - center)).argmin()
if (index not in ix):
ix.append(index)
return ix
| -1,834,903,684,328,060,000
|
Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
|
src/metpy/calc/tools.py
|
resample_nn_1d
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
def resample_nn_1d(a, centers):
'Return one-dimensional nearest-neighbor indexes based on user-specified centers.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of numeric values from which to\n extract indexes of nearest-neighbors\n centers : array-like\n 1-dimensional array of numeric values representing a subset of values to approximate\n\n Returns\n -------\n An array of indexes representing values closest to given array values\n\n '
ix = []
for center in centers:
index = np.abs((a - center)).argmin()
if (index not in ix):
ix.append(index)
return ix
|
@exporter.export
@preprocess_xarray
def nearest_intersection_idx(a, b):
'Determine the index of the point just before two lines with common x values.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of y-values for line 1\n b : array-like\n 1-dimensional array of y-values for line 2\n\n Returns\n -------\n An array of indexes representing the index of the values\n just before the intersection(s) of the two lines.\n\n '
difference = (a - b)
(sign_change_idx,) = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
| -2,360,650,077,986,789,400
|
Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
|
src/metpy/calc/tools.py
|
nearest_intersection_idx
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
def nearest_intersection_idx(a, b):
'Determine the index of the point just before two lines with common x values.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of y-values for line 1\n b : array-like\n 1-dimensional array of y-values for line 2\n\n Returns\n -------\n An array of indexes representing the index of the values\n just before the intersection(s) of the two lines.\n\n '
difference = (a - b)
(sign_change_idx,) = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
|
@exporter.export
@preprocess_xarray
@units.wraps(('=A', '=B'), ('=A', '=B', '=B'))
def find_intersections(x, a, b, direction='all', log_x=False):
"Calculate the best estimate of intersection.\n\n Calculates the best estimates of the intersection of two y-value\n data sets that share a common x-value set.\n\n Parameters\n ----------\n x : array-like\n 1-dimensional array of numeric x-values\n a : array-like\n 1-dimensional array of y-values for line 1\n b : array-like\n 1-dimensional array of y-values for line 2\n direction : string, optional\n specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),\n or 'decreasing' (b becoming greater than a). Defaults to 'all'.\n log_x : bool, optional\n Use logarithmic interpolation along the `x` axis (i.e. for finding intersections\n in pressure coordinates). Default is False.\n\n Returns\n -------\n A tuple (x, y) of array-like with the x and y coordinates of the\n intersections of the lines.\n\n "
if (log_x is True):
x = np.log(x)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = (nearest_idx + 1)
sign_change = np.sign((a[next_idx] - b[next_idx]))
(_, x0) = _next_non_masked_element(x, nearest_idx)
(_, x1) = _next_non_masked_element(x, next_idx)
(_, a0) = _next_non_masked_element(a, nearest_idx)
(_, a1) = _next_non_masked_element(a, next_idx)
(_, b0) = _next_non_masked_element(b, nearest_idx)
(_, b1) = _next_non_masked_element(b, next_idx)
delta_y0 = (a0 - b0)
delta_y1 = (a1 - b1)
intersect_x = (((delta_y1 * x0) - (delta_y0 * x1)) / (delta_y1 - delta_y0))
intersect_y = ((((intersect_x - x0) / (x1 - x0)) * (a1 - a0)) + a0)
if (len(intersect_x) == 0):
return (intersect_x, intersect_y)
if (log_x is True):
intersect_x = np.exp(intersect_x)
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
if (direction == 'increasing'):
mask = (sign_change > 0)
elif (direction == 'decreasing'):
mask = (sign_change < 0)
elif (direction == 'all'):
return (intersect_x[duplicate_mask], intersect_y[duplicate_mask])
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return (intersect_x[(mask & duplicate_mask)], intersect_y[(mask & duplicate_mask)])
| 7,216,288,770,360,933,000
|
Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
log_x : bool, optional
Use logarithmic interpolation along the `x` axis (i.e. for finding intersections
in pressure coordinates). Default is False.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
|
src/metpy/calc/tools.py
|
find_intersections
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
@units.wraps(('=A', '=B'), ('=A', '=B', '=B'))
def find_intersections(x, a, b, direction='all', log_x=False):
"Calculate the best estimate of intersection.\n\n Calculates the best estimates of the intersection of two y-value\n data sets that share a common x-value set.\n\n Parameters\n ----------\n x : array-like\n 1-dimensional array of numeric x-values\n a : array-like\n 1-dimensional array of y-values for line 1\n b : array-like\n 1-dimensional array of y-values for line 2\n direction : string, optional\n specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),\n or 'decreasing' (b becoming greater than a). Defaults to 'all'.\n log_x : bool, optional\n Use logarithmic interpolation along the `x` axis (i.e. for finding intersections\n in pressure coordinates). Default is False.\n\n Returns\n -------\n A tuple (x, y) of array-like with the x and y coordinates of the\n intersections of the lines.\n\n "
if (log_x is True):
x = np.log(x)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = (nearest_idx + 1)
sign_change = np.sign((a[next_idx] - b[next_idx]))
(_, x0) = _next_non_masked_element(x, nearest_idx)
(_, x1) = _next_non_masked_element(x, next_idx)
(_, a0) = _next_non_masked_element(a, nearest_idx)
(_, a1) = _next_non_masked_element(a, next_idx)
(_, b0) = _next_non_masked_element(b, nearest_idx)
(_, b1) = _next_non_masked_element(b, next_idx)
delta_y0 = (a0 - b0)
delta_y1 = (a1 - b1)
intersect_x = (((delta_y1 * x0) - (delta_y0 * x1)) / (delta_y1 - delta_y0))
intersect_y = ((((intersect_x - x0) / (x1 - x0)) * (a1 - a0)) + a0)
if (len(intersect_x) == 0):
return (intersect_x, intersect_y)
if (log_x is True):
intersect_x = np.exp(intersect_x)
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
if (direction == 'increasing'):
mask = (sign_change > 0)
elif (direction == 'decreasing'):
mask = (sign_change < 0)
elif (direction == 'all'):
return (intersect_x[duplicate_mask], intersect_y[duplicate_mask])
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return (intersect_x[(mask & duplicate_mask)], intersect_y[(mask & duplicate_mask)])
|
def _next_non_masked_element(a, idx):
'Return the next non masked element of a masked array.\n\n If an array is masked, return the next non-masked element (if the given index is masked).\n If no other unmasked points are after the given masked point, returns none.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of numeric values\n idx : integer\n index of requested element\n\n Returns\n -------\n Index of next non-masked element and next non-masked element\n\n '
try:
next_idx = (idx + a[idx:].mask.argmin())
if ma.is_masked(a[next_idx]):
return (None, None)
else:
return (next_idx, a[next_idx])
except (AttributeError, TypeError, IndexError):
return (idx, a[idx])
| -2,391,143,805,257,920,500
|
Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
|
src/metpy/calc/tools.py
|
_next_non_masked_element
|
Exi666/MetPy
|
python
|
def _next_non_masked_element(a, idx):
'Return the next non masked element of a masked array.\n\n If an array is masked, return the next non-masked element (if the given index is masked).\n If no other unmasked points are after the given masked point, returns none.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of numeric values\n idx : integer\n index of requested element\n\n Returns\n -------\n Index of next non-masked element and next non-masked element\n\n '
try:
next_idx = (idx + a[idx:].mask.argmin())
if ma.is_masked(a[next_idx]):
return (None, None)
else:
return (next_idx, a[next_idx])
except (AttributeError, TypeError, IndexError):
return (idx, a[idx])
|
def _delete_masked_points(*arrs):
'Delete masked points from arrays.\n\n Takes arrays and removes masked points to help with calculations and plotting.\n\n Parameters\n ----------\n arrs : one or more array-like\n source arrays\n\n Returns\n -------\n arrs : one or more array-like\n arrays with masked elements removed\n\n '
if any((hasattr(a, 'mask') for a in arrs)):
keep = (~ functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs)))
return tuple((ma.asarray(a[keep]) for a in arrs))
else:
return arrs
| 1,146,317,948,787,329,900
|
Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
|
src/metpy/calc/tools.py
|
_delete_masked_points
|
Exi666/MetPy
|
python
|
def _delete_masked_points(*arrs):
'Delete masked points from arrays.\n\n Takes arrays and removes masked points to help with calculations and plotting.\n\n Parameters\n ----------\n arrs : one or more array-like\n source arrays\n\n Returns\n -------\n arrs : one or more array-like\n arrays with masked elements removed\n\n '
if any((hasattr(a, 'mask') for a in arrs)):
keep = (~ functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs)))
return tuple((ma.asarray(a[keep]) for a in arrs))
else:
return arrs
|
@exporter.export
@preprocess_xarray
def reduce_point_density(points, radius, priority=None):
'Return a mask to reduce the density of points in irregularly-spaced data.\n\n This function is used to down-sample a collection of scattered points (e.g. surface\n data), returning a mask that can be used to select the points from one or more arrays\n (e.g. arrays of temperature and dew point). The points selected can be controlled by\n providing an array of ``priority`` values (e.g. rainfall totals to ensure that\n stations with higher precipitation remain in the mask). The points and radius can be\n specified with units. If none are provided, meters are assumed.\n\n Parameters\n ----------\n points : (N, K) array-like\n N locations of the points in K dimensional space\n radius : `pint.Quantity` or float\n Minimum radius allowed between points. If units are not provided, meters is assumed.\n priority : (N, K) array-like, optional\n If given, this should have the same shape as ``points``; these values will\n be used to control selection priority for points.\n\n Returns\n -------\n (N,) array-like of boolean values indicating whether points should be kept. This\n can be used directly to index numpy arrays to return only the desired points.\n\n Examples\n --------\n >>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)\n array([ True, False, True])\n >>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,\n ... priority=np.array([0.1, 0.9, 0.3]))\n array([False, True, False])\n\n '
if hasattr(radius, 'units'):
radius = radius.to('m').m
if hasattr(points, 'units'):
points = points.to('m').m
if (points.ndim < 2):
points = points.reshape((- 1), 1)
tree = cKDTree(points)
if (priority is not None):
sorted_indices = np.argsort(priority)[::(- 1)]
else:
sorted_indices = range(len(points))
keep = np.ones(len(points), dtype=np.bool)
for ind in sorted_indices:
if keep[ind]:
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
keep[ind] = True
return keep
| -5,638,059,920,344,696,000
|
Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask). The points and radius can be
specified with units. If none are provided, meters are assumed.
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : `pint.Quantity` or float
Minimum radius allowed between points. If units are not provided, meters is assumed.
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True])
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False])
|
src/metpy/calc/tools.py
|
reduce_point_density
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
def reduce_point_density(points, radius, priority=None):
'Return a mask to reduce the density of points in irregularly-spaced data.\n\n This function is used to down-sample a collection of scattered points (e.g. surface\n data), returning a mask that can be used to select the points from one or more arrays\n (e.g. arrays of temperature and dew point). The points selected can be controlled by\n providing an array of ``priority`` values (e.g. rainfall totals to ensure that\n stations with higher precipitation remain in the mask). The points and radius can be\n specified with units. If none are provided, meters are assumed.\n\n Parameters\n ----------\n points : (N, K) array-like\n N locations of the points in K dimensional space\n radius : `pint.Quantity` or float\n Minimum radius allowed between points. If units are not provided, meters is assumed.\n priority : (N, K) array-like, optional\n If given, this should have the same shape as ``points``; these values will\n be used to control selection priority for points.\n\n Returns\n -------\n (N,) array-like of boolean values indicating whether points should be kept. This\n can be used directly to index numpy arrays to return only the desired points.\n\n Examples\n --------\n >>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)\n array([ True, False, True])\n >>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,\n ... priority=np.array([0.1, 0.9, 0.3]))\n array([False, True, False])\n\n '
if hasattr(radius, 'units'):
radius = radius.to('m').m
if hasattr(points, 'units'):
points = points.to('m').m
if (points.ndim < 2):
points = points.reshape((- 1), 1)
tree = cKDTree(points)
if (priority is not None):
sorted_indices = np.argsort(priority)[::(- 1)]
else:
sorted_indices = range(len(points))
keep = np.ones(len(points), dtype=np.bool)
for ind in sorted_indices:
if keep[ind]:
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
keep[ind] = True
return keep
|
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
'Calculate the bounding pressure and height in a layer.\n\n Given pressure, optional heights, and a bound, return either the closest pressure/height\n or interpolated pressure/height. If no heights are provided, a standard atmosphere\n ([NOAA1976]_) is assumed.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressures\n bound : `pint.Quantity`\n Bound to retrieve (in pressure or height)\n heights : `pint.Quantity`, optional\n Atmospheric heights associated with the pressure levels. Defaults to using\n heights calculated from ``pressure`` assuming a standard atmosphere.\n interpolate : boolean, optional\n Interpolate the bound or return the nearest. Defaults to True.\n\n Returns\n -------\n `pint.Quantity`\n The bound pressure and height.\n\n '
from .basic import height_to_pressure_std, pressure_to_height_std
sort_inds = np.argsort(pressure)[::(- 1)]
pressure = pressure[sort_inds]
if (heights is not None):
heights = heights[sort_inds]
if (bound.dimensionality == {'[length]': (- 1.0), '[mass]': 1.0, '[time]': (- 2.0)}):
if (bound in pressure):
bound_pressure = bound
if (heights is not None):
bound_height = heights[(pressure == bound_pressure)]
else:
bound_height = pressure_to_height_std(bound_pressure)
elif interpolate:
bound_pressure = bound
if (heights is not None):
bound_height = log_interpolate_1d(bound_pressure, pressure, heights)
else:
bound_height = pressure_to_height_std(bound_pressure)
else:
idx = np.abs((pressure - bound)).argmin()
bound_pressure = pressure[idx]
if (heights is not None):
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
elif (bound.dimensionality == {'[length]': 1.0}):
if (heights is not None):
if (bound in heights):
bound_height = bound
bound_pressure = pressure[(heights == bound)]
elif interpolate:
bound_height = bound
bound_pressure = (np.interp(np.atleast_1d(bound.m), heights.m, pressure.m).astype(result_type(bound)) * pressure.units)
else:
idx = np.abs((heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else:
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
if (not interpolate):
idx = np.abs((pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
else:
raise ValueError('Bound must be specified in units of length or pressure.')
if (not (_greater_or_close(bound_pressure, (np.nanmin(pressure.m) * pressure.units)) and _less_or_close(bound_pressure, (np.nanmax(pressure.m) * pressure.units)))):
raise ValueError('Specified bound is outside pressure range.')
if ((heights is not None) and (not (_less_or_close(bound_height, (np.nanmax(heights.m) * heights.units)) and _greater_or_close(bound_height, (np.nanmin(heights.m) * heights.units))))):
raise ValueError('Specified bound is outside height range.')
return (bound_pressure, bound_height)
| 8,279,656,246,866,304,000
|
Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere
([NOAA1976]_) is assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
|
src/metpy/calc/tools.py
|
_get_bound_pressure_height
|
Exi666/MetPy
|
python
|
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
'Calculate the bounding pressure and height in a layer.\n\n Given pressure, optional heights, and a bound, return either the closest pressure/height\n or interpolated pressure/height. If no heights are provided, a standard atmosphere\n ([NOAA1976]_) is assumed.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressures\n bound : `pint.Quantity`\n Bound to retrieve (in pressure or height)\n heights : `pint.Quantity`, optional\n Atmospheric heights associated with the pressure levels. Defaults to using\n heights calculated from ``pressure`` assuming a standard atmosphere.\n interpolate : boolean, optional\n Interpolate the bound or return the nearest. Defaults to True.\n\n Returns\n -------\n `pint.Quantity`\n The bound pressure and height.\n\n '
from .basic import height_to_pressure_std, pressure_to_height_std
sort_inds = np.argsort(pressure)[::(- 1)]
pressure = pressure[sort_inds]
if (heights is not None):
heights = heights[sort_inds]
if (bound.dimensionality == {'[length]': (- 1.0), '[mass]': 1.0, '[time]': (- 2.0)}):
if (bound in pressure):
bound_pressure = bound
if (heights is not None):
bound_height = heights[(pressure == bound_pressure)]
else:
bound_height = pressure_to_height_std(bound_pressure)
elif interpolate:
bound_pressure = bound
if (heights is not None):
bound_height = log_interpolate_1d(bound_pressure, pressure, heights)
else:
bound_height = pressure_to_height_std(bound_pressure)
else:
idx = np.abs((pressure - bound)).argmin()
bound_pressure = pressure[idx]
if (heights is not None):
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
elif (bound.dimensionality == {'[length]': 1.0}):
if (heights is not None):
if (bound in heights):
bound_height = bound
bound_pressure = pressure[(heights == bound)]
elif interpolate:
bound_height = bound
bound_pressure = (np.interp(np.atleast_1d(bound.m), heights.m, pressure.m).astype(result_type(bound)) * pressure.units)
else:
idx = np.abs((heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else:
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
if (not interpolate):
idx = np.abs((pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
else:
raise ValueError('Bound must be specified in units of length or pressure.')
if (not (_greater_or_close(bound_pressure, (np.nanmin(pressure.m) * pressure.units)) and _less_or_close(bound_pressure, (np.nanmax(pressure.m) * pressure.units)))):
raise ValueError('Specified bound is outside pressure range.')
if ((heights is not None) and (not (_less_or_close(bound_height, (np.nanmax(heights.m) * heights.units)) and _greater_or_close(bound_height, (np.nanmin(heights.m) * heights.units))))):
raise ValueError('Specified bound is outside height range.')
return (bound_pressure, bound_height)
|
@exporter.export
@preprocess_xarray
@check_units('[length]')
def get_layer_heights(heights, depth, *args, bottom=None, interpolate=True, with_agl=False):
'Return an atmospheric layer from upper air data with the requested bottom and depth.\n\n This function will subset an upper air dataset to contain only the specified layer using\n the heights only.\n\n Parameters\n ----------\n heights : array-like\n Atmospheric heights\n depth : `pint.Quantity`\n The thickness of the layer\n args : array-like\n Atmospheric variable(s) measured at the given pressures\n bottom : `pint.Quantity`, optional\n The bottom of the layer\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data. Defaults\n to True.\n with_agl : bool, optional\n Returns the heights as above ground level by subtracting the minimum height in the\n provided heights. Defaults to False.\n\n Returns\n -------\n `pint.Quantity, pint.Quantity`\n The height and data variables of the layer\n\n '
for datavar in args:
if (len(heights) != len(datavar)):
raise ValueError('Height and data variables must have the same length.')
if with_agl:
sfc_height = np.min(heights)
heights = (heights - sfc_height)
if (bottom is None):
bottom = heights[0]
heights = heights.to_base_units()
bottom = bottom.to_base_units()
top = (bottom + depth)
ret = []
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
inds = (_greater_or_close(heights, bottom) & _less_or_close(heights, top))
heights_interp = heights[inds]
if interpolate:
if (top not in heights_interp):
heights_interp = (np.sort(np.append(heights_interp.m, top.m)) * heights.units)
if (bottom not in heights_interp):
heights_interp = (np.sort(np.append(heights_interp.m, bottom.m)) * heights.units)
ret.append(heights_interp)
for datavar in args:
datavar = datavar[sort_inds]
if interpolate:
datavar_interp = interpolate_1d(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
| 832,905,234,243,500,700
|
Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
|
src/metpy/calc/tools.py
|
get_layer_heights
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
@check_units('[length]')
def get_layer_heights(heights, depth, *args, bottom=None, interpolate=True, with_agl=False):
'Return an atmospheric layer from upper air data with the requested bottom and depth.\n\n This function will subset an upper air dataset to contain only the specified layer using\n the heights only.\n\n Parameters\n ----------\n heights : array-like\n Atmospheric heights\n depth : `pint.Quantity`\n The thickness of the layer\n args : array-like\n Atmospheric variable(s) measured at the given pressures\n bottom : `pint.Quantity`, optional\n The bottom of the layer\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data. Defaults\n to True.\n with_agl : bool, optional\n Returns the heights as above ground level by subtracting the minimum height in the\n provided heights. Defaults to False.\n\n Returns\n -------\n `pint.Quantity, pint.Quantity`\n The height and data variables of the layer\n\n '
for datavar in args:
if (len(heights) != len(datavar)):
raise ValueError('Height and data variables must have the same length.')
if with_agl:
sfc_height = np.min(heights)
heights = (heights - sfc_height)
if (bottom is None):
bottom = heights[0]
heights = heights.to_base_units()
bottom = bottom.to_base_units()
top = (bottom + depth)
ret = []
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
inds = (_greater_or_close(heights, bottom) & _less_or_close(heights, top))
heights_interp = heights[inds]
if interpolate:
if (top not in heights_interp):
heights_interp = (np.sort(np.append(heights_interp.m, top.m)) * heights.units)
if (bottom not in heights_interp):
heights_interp = (np.sort(np.append(heights_interp.m, bottom.m)) * heights.units)
ret.append(heights_interp)
for datavar in args:
datavar = datavar[sort_inds]
if interpolate:
datavar_interp = interpolate_1d(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
|
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def get_layer(pressure, *args, heights=None, bottom=None, depth=(100 * units.hPa), interpolate=True):
'Return an atmospheric layer from upper air data with the requested bottom and depth.\n\n This function will subset an upper air dataset to contain only the specified layer. The\n bottom of the layer can be specified with a pressure or height above the surface\n pressure. The bottom defaults to the surface pressure. The depth of the layer can be\n specified in terms of pressure or height above the bottom of the layer. If the top and\n bottom of the layer are not in the data, they are interpolated by default.\n\n Parameters\n ----------\n pressure : array-like\n Atmospheric pressure profile\n args : array-like\n Atmospheric variable(s) measured at the given pressures\n heights: array-like, optional\n Atmospheric heights corresponding to the given pressures. Defaults to using\n heights calculated from ``p`` assuming a standard atmosphere [NOAA1976]_.\n bottom : `pint.Quantity`, optional\n The bottom of the layer as a pressure or height above the surface pressure. Defaults\n to the highest pressure or lowest height given.\n depth : `pint.Quantity`, optional\n The thickness of the layer as a pressure or height above the bottom of the layer.\n Defaults to 100 hPa.\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data. Defaults\n to True.\n\n Returns\n -------\n `pint.Quantity, pint.Quantity`\n The pressure and data variables of the layer\n\n '
if (depth is None):
depth = (100 * units.hPa)
for datavar in args:
if (len(pressure) != len(datavar)):
raise ValueError('Pressure and data variables must have the same length.')
if (bottom is None):
bottom = (np.nanmax(pressure.m) * pressure.units)
(bottom_pressure, bottom_height) = _get_bound_pressure_height(pressure, bottom, heights=heights, interpolate=interpolate)
if (depth.dimensionality == {'[length]': (- 1.0), '[mass]': 1.0, '[time]': (- 2.0)}):
top = (bottom_pressure - depth)
elif (depth.dimensionality == {'[length]': 1}):
top = (bottom_height + depth)
else:
raise ValueError('Depth must be specified in units of length or pressure')
(top_pressure, _) = _get_bound_pressure_height(pressure, top, heights=heights, interpolate=interpolate)
ret = []
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
inds = (_less_or_close(pressure, bottom_pressure) & _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
if interpolate:
if (not np.any(np.isclose(top_pressure, p_interp))):
p_interp = (np.sort(np.append(p_interp.m, top_pressure.m)) * pressure.units)
if (not np.any(np.isclose(bottom_pressure, p_interp))):
p_interp = (np.sort(np.append(p_interp.m, bottom_pressure.m)) * pressure.units)
ret.append(p_interp[::(- 1)])
for datavar in args:
datavar = datavar[sort_inds]
if interpolate:
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::(- 1)])
return ret
| -6,791,250,193,567,541,000
|
Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere [NOAA1976]_.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
|
src/metpy/calc/tools.py
|
get_layer
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def get_layer(pressure, *args, heights=None, bottom=None, depth=(100 * units.hPa), interpolate=True):
'Return an atmospheric layer from upper air data with the requested bottom and depth.\n\n This function will subset an upper air dataset to contain only the specified layer. The\n bottom of the layer can be specified with a pressure or height above the surface\n pressure. The bottom defaults to the surface pressure. The depth of the layer can be\n specified in terms of pressure or height above the bottom of the layer. If the top and\n bottom of the layer are not in the data, they are interpolated by default.\n\n Parameters\n ----------\n pressure : array-like\n Atmospheric pressure profile\n args : array-like\n Atmospheric variable(s) measured at the given pressures\n heights: array-like, optional\n Atmospheric heights corresponding to the given pressures. Defaults to using\n heights calculated from ``p`` assuming a standard atmosphere [NOAA1976]_.\n bottom : `pint.Quantity`, optional\n The bottom of the layer as a pressure or height above the surface pressure. Defaults\n to the highest pressure or lowest height given.\n depth : `pint.Quantity`, optional\n The thickness of the layer as a pressure or height above the bottom of the layer.\n Defaults to 100 hPa.\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data. Defaults\n to True.\n\n Returns\n -------\n `pint.Quantity, pint.Quantity`\n The pressure and data variables of the layer\n\n '
if (depth is None):
depth = (100 * units.hPa)
for datavar in args:
if (len(pressure) != len(datavar)):
raise ValueError('Pressure and data variables must have the same length.')
if (bottom is None):
bottom = (np.nanmax(pressure.m) * pressure.units)
(bottom_pressure, bottom_height) = _get_bound_pressure_height(pressure, bottom, heights=heights, interpolate=interpolate)
if (depth.dimensionality == {'[length]': (- 1.0), '[mass]': 1.0, '[time]': (- 2.0)}):
top = (bottom_pressure - depth)
elif (depth.dimensionality == {'[length]': 1}):
top = (bottom_height + depth)
else:
raise ValueError('Depth must be specified in units of length or pressure')
(top_pressure, _) = _get_bound_pressure_height(pressure, top, heights=heights, interpolate=interpolate)
ret = []
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
inds = (_less_or_close(pressure, bottom_pressure) & _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
if interpolate:
if (not np.any(np.isclose(top_pressure, p_interp))):
p_interp = (np.sort(np.append(p_interp.m, top_pressure.m)) * pressure.units)
if (not np.any(np.isclose(bottom_pressure, p_interp))):
p_interp = (np.sort(np.append(p_interp.m, bottom_pressure.m)) * pressure.units)
ret.append(p_interp[::(- 1)])
for datavar in args:
datavar = datavar[sort_inds]
if interpolate:
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::(- 1)])
return ret
|
@exporter.export
@preprocess_xarray
def find_bounding_indices(arr, values, axis, from_below=True):
'Find the indices surrounding the values within arr along axis.\n\n Returns a set of above, below, good. Above and below are lists of arrays of indices.\n These lists are formulated such that they can be used directly to index into a numpy\n array and get the expected results (no extra slices or ellipsis necessary). `good` is\n a boolean array indicating the "columns" that actually had values to bound the desired\n value(s).\n\n Parameters\n ----------\n arr : array-like\n Array to search for values\n\n values: array-like\n One or more values to search for in `arr`\n\n axis : int\n The dimension of `arr` along which to search.\n\n from_below : bool, optional\n Whether to search from "below" (i.e. low indices to high indices). If `False`,\n the search will instead proceed from high indices to low indices. Defaults to `True`.\n\n Returns\n -------\n above : list of arrays\n List of broadcasted indices to the location above the desired value\n\n below : list of arrays\n List of broadcasted indices to the location below the desired value\n\n good : array\n Boolean array indicating where the search found proper bounds for the desired value\n\n '
indices_shape = list(arr.shape)
indices_shape[axis] = len(values)
indices = np.empty(indices_shape, dtype=np.int)
good = np.empty(indices_shape, dtype=np.bool)
store_slice = ([slice(None)] * arr.ndim)
for (level_index, value) in enumerate(values):
switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis))
good_search = np.any(switches, axis=axis)
if from_below:
index = (switches.argmax(axis=axis) + 1)
else:
arr_slice = ([slice(None)] * arr.ndim)
arr_slice[axis] = slice(None, None, (- 1))
index = ((arr.shape[axis] - 1) - switches[tuple(arr_slice)].argmax(axis=axis))
index[(~ good_search)] = 0
store_slice[axis] = level_index
indices[tuple(store_slice)] = index
good[tuple(store_slice)] = good_search
above = broadcast_indices(arr, indices, arr.ndim, axis)
below = broadcast_indices(arr, (indices - 1), arr.ndim, axis)
return (above, below, good)
| 2,505,778,878,363,609,000
|
Find the indices surrounding the values within arr along axis.
Returns a set of above, below, good. Above and below are lists of arrays of indices.
These lists are formulated such that they can be used directly to index into a numpy
array and get the expected results (no extra slices or ellipsis necessary). `good` is
a boolean array indicating the "columns" that actually had values to bound the desired
value(s).
Parameters
----------
arr : array-like
Array to search for values
values: array-like
One or more values to search for in `arr`
axis : int
The dimension of `arr` along which to search.
from_below : bool, optional
Whether to search from "below" (i.e. low indices to high indices). If `False`,
the search will instead proceed from high indices to low indices. Defaults to `True`.
Returns
-------
above : list of arrays
List of broadcasted indices to the location above the desired value
below : list of arrays
List of broadcasted indices to the location below the desired value
good : array
Boolean array indicating where the search found proper bounds for the desired value
|
src/metpy/calc/tools.py
|
find_bounding_indices
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
def find_bounding_indices(arr, values, axis, from_below=True):
'Find the indices surrounding the values within arr along axis.\n\n Returns a set of above, below, good. Above and below are lists of arrays of indices.\n These lists are formulated such that they can be used directly to index into a numpy\n array and get the expected results (no extra slices or ellipsis necessary). `good` is\n a boolean array indicating the "columns" that actually had values to bound the desired\n value(s).\n\n Parameters\n ----------\n arr : array-like\n Array to search for values\n\n values: array-like\n One or more values to search for in `arr`\n\n axis : int\n The dimension of `arr` along which to search.\n\n from_below : bool, optional\n Whether to search from "below" (i.e. low indices to high indices). If `False`,\n the search will instead proceed from high indices to low indices. Defaults to `True`.\n\n Returns\n -------\n above : list of arrays\n List of broadcasted indices to the location above the desired value\n\n below : list of arrays\n List of broadcasted indices to the location below the desired value\n\n good : array\n Boolean array indicating where the search found proper bounds for the desired value\n\n '
indices_shape = list(arr.shape)
indices_shape[axis] = len(values)
indices = np.empty(indices_shape, dtype=np.int)
good = np.empty(indices_shape, dtype=np.bool)
store_slice = ([slice(None)] * arr.ndim)
for (level_index, value) in enumerate(values):
switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis))
good_search = np.any(switches, axis=axis)
if from_below:
index = (switches.argmax(axis=axis) + 1)
else:
arr_slice = ([slice(None)] * arr.ndim)
arr_slice[axis] = slice(None, None, (- 1))
index = ((arr.shape[axis] - 1) - switches[tuple(arr_slice)].argmax(axis=axis))
index[(~ good_search)] = 0
store_slice[axis] = level_index
indices[tuple(store_slice)] = index
good[tuple(store_slice)] = good_search
above = broadcast_indices(arr, indices, arr.ndim, axis)
below = broadcast_indices(arr, (indices - 1), arr.ndim, axis)
return (above, below, good)
|
def _greater_or_close(a, value, **kwargs):
'Compare values for greater or close to boolean masks.\n\n Returns a boolean mask for values greater than or equal to a target within a specified\n absolute or relative tolerance (as in :func:`numpy.isclose`).\n\n Parameters\n ----------\n a : array-like\n Array of values to be compared\n value : float\n Comparison value\n\n Returns\n -------\n array-like\n Boolean array where values are greater than or nearly equal to value.\n\n '
return ((a > value) | np.isclose(a, value, **kwargs))
| -7,560,346,119,085,802,000
|
Compare values for greater or close to boolean masks.
Returns a boolean mask for values greater than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are greater than or nearly equal to value.
|
src/metpy/calc/tools.py
|
_greater_or_close
|
Exi666/MetPy
|
python
|
def _greater_or_close(a, value, **kwargs):
'Compare values for greater or close to boolean masks.\n\n Returns a boolean mask for values greater than or equal to a target within a specified\n absolute or relative tolerance (as in :func:`numpy.isclose`).\n\n Parameters\n ----------\n a : array-like\n Array of values to be compared\n value : float\n Comparison value\n\n Returns\n -------\n array-like\n Boolean array where values are greater than or nearly equal to value.\n\n '
return ((a > value) | np.isclose(a, value, **kwargs))
|
def _less_or_close(a, value, **kwargs):
'Compare values for less or close to boolean masks.\n\n Returns a boolean mask for values less than or equal to a target within a specified\n absolute or relative tolerance (as in :func:`numpy.isclose`).\n\n Parameters\n ----------\n a : array-like\n Array of values to be compared\n value : float\n Comparison value\n\n Returns\n -------\n array-like\n Boolean array where values are less than or nearly equal to value.\n\n '
return ((a < value) | np.isclose(a, value, **kwargs))
| -8,507,754,622,202,489,000
|
Compare values for less or close to boolean masks.
Returns a boolean mask for values less than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are less than or nearly equal to value.
|
src/metpy/calc/tools.py
|
_less_or_close
|
Exi666/MetPy
|
python
|
def _less_or_close(a, value, **kwargs):
'Compare values for less or close to boolean masks.\n\n Returns a boolean mask for values less than or equal to a target within a specified\n absolute or relative tolerance (as in :func:`numpy.isclose`).\n\n Parameters\n ----------\n a : array-like\n Array of values to be compared\n value : float\n Comparison value\n\n Returns\n -------\n array-like\n Boolean array where values are less than or nearly equal to value.\n\n '
return ((a < value) | np.isclose(a, value, **kwargs))
|
@exporter.export
@preprocess_xarray
def lat_lon_grid_deltas(longitude, latitude, **kwargs):
'Calculate the delta between grid points that are in a latitude/longitude format.\n\n Calculate the signed delta distance between grid points when the grid spacing is defined by\n delta lat/lon rather than delta x/y\n\n Parameters\n ----------\n longitude : array_like\n array of longitudes defining the grid\n latitude : array_like\n array of latitudes defining the grid\n kwargs\n Other keyword arguments to pass to :class:`~pyproj.Geod`\n\n Returns\n -------\n dx, dy:\n at least two dimensional arrays of signed deltas between grid points in the x and y\n direction\n\n Notes\n -----\n Accepts 1D, 2D, or higher arrays for latitude and longitude\n Assumes [..., Y, X] for >=2 dimensional arrays\n\n '
from pyproj import Geod
if (latitude.ndim != longitude.ndim):
raise ValueError('Latitude and longitude must have the same number of dimensions.')
if (latitude.ndim < 2):
(longitude, latitude) = np.meshgrid(longitude, latitude)
geod_args = {'ellps': 'sphere'}
if kwargs:
geod_args = kwargs
g = Geod(**geod_args)
(forward_az, _, dy) = g.inv(longitude[..., :(- 1), :], latitude[..., :(- 1), :], longitude[..., 1:, :], latitude[..., 1:, :])
dy[((forward_az < (- 90.0)) | (forward_az > 90.0))] *= (- 1)
(forward_az, _, dx) = g.inv(longitude[..., :, :(- 1)], latitude[..., :, :(- 1)], longitude[..., :, 1:], latitude[..., :, 1:])
dx[((forward_az < 0.0) | (forward_az > 180.0))] *= (- 1)
return ((dx * units.meter), (dy * units.meter))
| -3,332,198,802,547,874,000
|
Calculate the delta between grid points that are in a latitude/longitude format.
Calculate the signed delta distance between grid points when the grid spacing is defined by
delta lat/lon rather than delta x/y
Parameters
----------
longitude : array_like
array of longitudes defining the grid
latitude : array_like
array of latitudes defining the grid
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy:
at least two dimensional arrays of signed deltas between grid points in the x and y
direction
Notes
-----
Accepts 1D, 2D, or higher arrays for latitude and longitude
Assumes [..., Y, X] for >=2 dimensional arrays
|
src/metpy/calc/tools.py
|
lat_lon_grid_deltas
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
def lat_lon_grid_deltas(longitude, latitude, **kwargs):
'Calculate the delta between grid points that are in a latitude/longitude format.\n\n Calculate the signed delta distance between grid points when the grid spacing is defined by\n delta lat/lon rather than delta x/y\n\n Parameters\n ----------\n longitude : array_like\n array of longitudes defining the grid\n latitude : array_like\n array of latitudes defining the grid\n kwargs\n Other keyword arguments to pass to :class:`~pyproj.Geod`\n\n Returns\n -------\n dx, dy:\n at least two dimensional arrays of signed deltas between grid points in the x and y\n direction\n\n Notes\n -----\n Accepts 1D, 2D, or higher arrays for latitude and longitude\n Assumes [..., Y, X] for >=2 dimensional arrays\n\n '
from pyproj import Geod
if (latitude.ndim != longitude.ndim):
raise ValueError('Latitude and longitude must have the same number of dimensions.')
if (latitude.ndim < 2):
(longitude, latitude) = np.meshgrid(longitude, latitude)
geod_args = {'ellps': 'sphere'}
if kwargs:
geod_args = kwargs
g = Geod(**geod_args)
(forward_az, _, dy) = g.inv(longitude[..., :(- 1), :], latitude[..., :(- 1), :], longitude[..., 1:, :], latitude[..., 1:, :])
dy[((forward_az < (- 90.0)) | (forward_az > 90.0))] *= (- 1)
(forward_az, _, dx) = g.inv(longitude[..., :, :(- 1)], latitude[..., :, :(- 1)], longitude[..., :, 1:], latitude[..., :, 1:])
dx[((forward_az < 0.0) | (forward_az > 180.0))] *= (- 1)
return ((dx * units.meter), (dy * units.meter))
|
@exporter.export
def grid_deltas_from_dataarray(f):
'Calculate the horizontal deltas between grid points of a DataArray.\n\n Calculate the signed delta distance between grid points of a DataArray in the horizontal\n directions, whether the grid is lat/lon or x/y.\n\n Parameters\n ----------\n f : `xarray.DataArray`\n Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)\n dimension order\n\n Returns\n -------\n dx, dy:\n arrays of signed deltas between grid points in the x and y directions with dimensions\n matching those of `f`.\n\n See Also\n --------\n lat_lon_grid_deltas\n\n '
if (f.metpy.crs['grid_mapping_name'] == 'latitude_longitude'):
(dx, dy) = lat_lon_grid_deltas(f.metpy.x, f.metpy.y, initstring=f.metpy.cartopy_crs.proj4_init)
slc_x = slc_y = tuple((([np.newaxis] * (f.ndim - 2)) + ([slice(None)] * 2)))
else:
dx = (np.diff(f.metpy.x.metpy.unit_array.to('m').magnitude) * units('m'))
dy = (np.diff(f.metpy.y.metpy.unit_array.to('m').magnitude) * units('m'))
slc = ([np.newaxis] * (f.ndim - 2))
slc_x = tuple((slc + [np.newaxis, slice(None)]))
slc_y = tuple((slc + [slice(None), np.newaxis]))
return (dx[slc_x], dy[slc_y])
| -876,946,303,530,190,800
|
Calculate the horizontal deltas between grid points of a DataArray.
Calculate the signed delta distance between grid points of a DataArray in the horizontal
directions, whether the grid is lat/lon or x/y.
Parameters
----------
f : `xarray.DataArray`
Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)
dimension order
Returns
-------
dx, dy:
arrays of signed deltas between grid points in the x and y directions with dimensions
matching those of `f`.
See Also
--------
lat_lon_grid_deltas
|
src/metpy/calc/tools.py
|
grid_deltas_from_dataarray
|
Exi666/MetPy
|
python
|
@exporter.export
def grid_deltas_from_dataarray(f):
'Calculate the horizontal deltas between grid points of a DataArray.\n\n Calculate the signed delta distance between grid points of a DataArray in the horizontal\n directions, whether the grid is lat/lon or x/y.\n\n Parameters\n ----------\n f : `xarray.DataArray`\n Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)\n dimension order\n\n Returns\n -------\n dx, dy:\n arrays of signed deltas between grid points in the x and y directions with dimensions\n matching those of `f`.\n\n See Also\n --------\n lat_lon_grid_deltas\n\n '
if (f.metpy.crs['grid_mapping_name'] == 'latitude_longitude'):
(dx, dy) = lat_lon_grid_deltas(f.metpy.x, f.metpy.y, initstring=f.metpy.cartopy_crs.proj4_init)
slc_x = slc_y = tuple((([np.newaxis] * (f.ndim - 2)) + ([slice(None)] * 2)))
else:
dx = (np.diff(f.metpy.x.metpy.unit_array.to('m').magnitude) * units('m'))
dy = (np.diff(f.metpy.y.metpy.unit_array.to('m').magnitude) * units('m'))
slc = ([np.newaxis] * (f.ndim - 2))
slc_x = tuple((slc + [np.newaxis, slice(None)]))
slc_y = tuple((slc + [slice(None), np.newaxis]))
return (dx[slc_x], dy[slc_y])
|
def xarray_derivative_wrap(func):
'Decorate the derivative functions to make them work nicely with DataArrays.\n\n This will automatically determine if the coordinates can be pulled directly from the\n DataArray, or if a call to lat_lon_grid_deltas is needed.\n '
@functools.wraps(func)
def wrapper(f, **kwargs):
if (('x' in kwargs) or ('delta' in kwargs)):
return preprocess_xarray(func)(f, **kwargs)
elif isinstance(f, xr.DataArray):
axis = f.metpy.find_axis_name(kwargs.get('axis', 0))
new_kwargs = {'axis': f.get_axis_num(axis)}
if check_axis(f[axis], 'time'):
new_kwargs['delta'] = f[axis].metpy.time_deltas
elif check_axis(f[axis], 'longitude'):
(new_kwargs['delta'], _) = grid_deltas_from_dataarray(f)
elif check_axis(f[axis], 'latitude'):
(_, new_kwargs['delta']) = grid_deltas_from_dataarray(f)
else:
new_kwargs['x'] = f[axis].metpy.unit_array
result = func(f.metpy.unit_array, **new_kwargs)
return xr.DataArray(result.magnitude, coords=f.coords, dims=f.dims, attrs={'units': str(result.units)})
else:
raise ValueError('Must specify either "x" or "delta" for value positions when "f" is not a DataArray.')
return wrapper
| -7,242,021,336,284,623,000
|
Decorate the derivative functions to make them work nicely with DataArrays.
This will automatically determine if the coordinates can be pulled directly from the
DataArray, or if a call to lat_lon_grid_deltas is needed.
|
src/metpy/calc/tools.py
|
xarray_derivative_wrap
|
Exi666/MetPy
|
python
|
def xarray_derivative_wrap(func):
'Decorate the derivative functions to make them work nicely with DataArrays.\n\n This will automatically determine if the coordinates can be pulled directly from the\n DataArray, or if a call to lat_lon_grid_deltas is needed.\n '
@functools.wraps(func)
def wrapper(f, **kwargs):
if (('x' in kwargs) or ('delta' in kwargs)):
return preprocess_xarray(func)(f, **kwargs)
elif isinstance(f, xr.DataArray):
axis = f.metpy.find_axis_name(kwargs.get('axis', 0))
new_kwargs = {'axis': f.get_axis_num(axis)}
if check_axis(f[axis], 'time'):
new_kwargs['delta'] = f[axis].metpy.time_deltas
elif check_axis(f[axis], 'longitude'):
(new_kwargs['delta'], _) = grid_deltas_from_dataarray(f)
elif check_axis(f[axis], 'latitude'):
(_, new_kwargs['delta']) = grid_deltas_from_dataarray(f)
else:
new_kwargs['x'] = f[axis].metpy.unit_array
result = func(f.metpy.unit_array, **new_kwargs)
return xr.DataArray(result.magnitude, coords=f.coords, dims=f.dims, attrs={'units': str(result.units)})
else:
raise ValueError('Must specify either "x" or "delta" for value positions when "f" is not a DataArray.')
return wrapper
|
@exporter.export
@xarray_derivative_wrap
def first_derivative(f, **kwargs):
'Calculate the first derivative of a grid of values.\n\n Works for both regularly-spaced data and grids with varying spacing.\n\n Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with\n attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or\n `delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned\n as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached\n coordinate information belonging to `axis` will be used and the derivative will be returned\n as an `xarray.DataArray`.\n\n This uses 3 points to calculate the derivative, using forward or backward at the edges of\n the grid as appropriate, and centered elsewhere. The irregular spacing is handled\n explicitly, using the formulation as specified by [Bowen2005]_.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n axis : int or str, optional\n The array axis along which to take the derivative. If `f` is ndarray-like, must be an\n integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate\n dimension name or the axis type) or integer (referring to axis number), unless using\n implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults\n to 0.\n x : array-like, optional\n The coordinate values corresponding to the grid points in `f`.\n delta : array-like, optional\n Spacing between the grid points in `f`. Should be one item less than the size\n of `f` along `axis`.\n\n Returns\n -------\n array-like\n The first derivative calculated along the selected axis.\n\n See Also\n --------\n second_derivative\n\n '
(n, axis, delta) = _process_deriv_args(f, kwargs)
slice0 = ([slice(None)] * n)
slice1 = ([slice(None)] * n)
slice2 = ([slice(None)] * n)
delta_slice0 = ([slice(None)] * n)
delta_slice1 = ([slice(None)] * n)
slice0[axis] = slice(None, (- 2))
slice1[axis] = slice(1, (- 1))
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, (- 1))
delta_slice1[axis] = slice(1, None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
delta_diff = (delta[tuple(delta_slice1)] - delta[tuple(delta_slice0)])
center = (((((- delta[tuple(delta_slice1)]) / (combined_delta * delta[tuple(delta_slice0)])) * f[tuple(slice0)]) + ((delta_diff / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])) * f[tuple(slice1)])) + ((delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])) * f[tuple(slice2)]))
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
big_delta = (combined_delta + delta[tuple(delta_slice0)])
left = (((((- big_delta) / (combined_delta * delta[tuple(delta_slice0)])) * f[tuple(slice0)]) + ((combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])) * f[tuple(slice1)])) - ((delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])) * f[tuple(slice2)]))
slice0[axis] = slice((- 3), (- 2))
slice1[axis] = slice((- 2), (- 1))
slice2[axis] = slice((- 1), None)
delta_slice0[axis] = slice((- 2), (- 1))
delta_slice1[axis] = slice((- 1), None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
big_delta = (combined_delta + delta[tuple(delta_slice1)])
right = ((((delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])) * f[tuple(slice0)]) - ((combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])) * f[tuple(slice1)])) + ((big_delta / (combined_delta * delta[tuple(delta_slice1)])) * f[tuple(slice2)]))
return concatenate((left, center, right), axis=axis)
| -1,198,659,539,310,157,300
|
Calculate the first derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. Should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The first derivative calculated along the selected axis.
See Also
--------
second_derivative
|
src/metpy/calc/tools.py
|
first_derivative
|
Exi666/MetPy
|
python
|
@exporter.export
@xarray_derivative_wrap
def first_derivative(f, **kwargs):
'Calculate the first derivative of a grid of values.\n\n Works for both regularly-spaced data and grids with varying spacing.\n\n Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with\n attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or\n `delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned\n as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached\n coordinate information belonging to `axis` will be used and the derivative will be returned\n as an `xarray.DataArray`.\n\n This uses 3 points to calculate the derivative, using forward or backward at the edges of\n the grid as appropriate, and centered elsewhere. The irregular spacing is handled\n explicitly, using the formulation as specified by [Bowen2005]_.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n axis : int or str, optional\n The array axis along which to take the derivative. If `f` is ndarray-like, must be an\n integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate\n dimension name or the axis type) or integer (referring to axis number), unless using\n implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults\n to 0.\n x : array-like, optional\n The coordinate values corresponding to the grid points in `f`.\n delta : array-like, optional\n Spacing between the grid points in `f`. Should be one item less than the size\n of `f` along `axis`.\n\n Returns\n -------\n array-like\n The first derivative calculated along the selected axis.\n\n See Also\n --------\n second_derivative\n\n '
(n, axis, delta) = _process_deriv_args(f, kwargs)
slice0 = ([slice(None)] * n)
slice1 = ([slice(None)] * n)
slice2 = ([slice(None)] * n)
delta_slice0 = ([slice(None)] * n)
delta_slice1 = ([slice(None)] * n)
slice0[axis] = slice(None, (- 2))
slice1[axis] = slice(1, (- 1))
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, (- 1))
delta_slice1[axis] = slice(1, None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
delta_diff = (delta[tuple(delta_slice1)] - delta[tuple(delta_slice0)])
center = (((((- delta[tuple(delta_slice1)]) / (combined_delta * delta[tuple(delta_slice0)])) * f[tuple(slice0)]) + ((delta_diff / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])) * f[tuple(slice1)])) + ((delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])) * f[tuple(slice2)]))
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
big_delta = (combined_delta + delta[tuple(delta_slice0)])
left = (((((- big_delta) / (combined_delta * delta[tuple(delta_slice0)])) * f[tuple(slice0)]) + ((combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])) * f[tuple(slice1)])) - ((delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])) * f[tuple(slice2)]))
slice0[axis] = slice((- 3), (- 2))
slice1[axis] = slice((- 2), (- 1))
slice2[axis] = slice((- 1), None)
delta_slice0[axis] = slice((- 2), (- 1))
delta_slice1[axis] = slice((- 1), None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
big_delta = (combined_delta + delta[tuple(delta_slice1)])
right = ((((delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])) * f[tuple(slice0)]) - ((combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])) * f[tuple(slice1)])) + ((big_delta / (combined_delta * delta[tuple(delta_slice1)])) * f[tuple(slice2)]))
return concatenate((left, center, right), axis=axis)
|
@exporter.export
@xarray_derivative_wrap
def second_derivative(f, **kwargs):
'Calculate the second derivative of a grid of values.\n\n Works for both regularly-spaced data and grids with varying spacing.\n\n Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with\n attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or\n `delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned\n as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached\n coordinate information belonging to `axis` will be used and the derivative will be returned\n as an `xarray.DataArray`.\n\n This uses 3 points to calculate the derivative, using forward or backward at the edges of\n the grid as appropriate, and centered elsewhere. The irregular spacing is handled\n explicitly, using the formulation as specified by [Bowen2005]_.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n axis : int or str, optional\n The array axis along which to take the derivative. If `f` is ndarray-like, must be an\n integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate\n dimension name or the axis type) or integer (referring to axis number), unless using\n implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults\n to 0.\n x : array-like, optional\n The coordinate values corresponding to the grid points in `f`.\n delta : array-like, optional\n Spacing between the grid points in `f`. There should be one item less than the size\n of `f` along `axis`.\n\n Returns\n -------\n array-like\n The second derivative calculated along the selected axis.\n\n See Also\n --------\n first_derivative\n\n '
(n, axis, delta) = _process_deriv_args(f, kwargs)
slice0 = ([slice(None)] * n)
slice1 = ([slice(None)] * n)
slice2 = ([slice(None)] * n)
delta_slice0 = ([slice(None)] * n)
delta_slice1 = ([slice(None)] * n)
slice0[axis] = slice(None, (- 2))
slice1[axis] = slice(1, (- 1))
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, (- 1))
delta_slice1[axis] = slice(1, None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
center = (2 * (((f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])) - (f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)]))) + (f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))))
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
left = (2 * (((f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])) - (f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)]))) + (f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))))
slice0[axis] = slice((- 3), (- 2))
slice1[axis] = slice((- 2), (- 1))
slice2[axis] = slice((- 1), None)
delta_slice0[axis] = slice((- 2), (- 1))
delta_slice1[axis] = slice((- 1), None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
right = (2 * (((f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])) - (f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)]))) + (f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))))
return concatenate((left, center, right), axis=axis)
| -1,680,862,827,805,037,800
|
Calculate the second derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The second derivative calculated along the selected axis.
See Also
--------
first_derivative
|
src/metpy/calc/tools.py
|
second_derivative
|
Exi666/MetPy
|
python
|
@exporter.export
@xarray_derivative_wrap
def second_derivative(f, **kwargs):
'Calculate the second derivative of a grid of values.\n\n Works for both regularly-spaced data and grids with varying spacing.\n\n Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with\n attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or\n `delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned\n as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached\n coordinate information belonging to `axis` will be used and the derivative will be returned\n as an `xarray.DataArray`.\n\n This uses 3 points to calculate the derivative, using forward or backward at the edges of\n the grid as appropriate, and centered elsewhere. The irregular spacing is handled\n explicitly, using the formulation as specified by [Bowen2005]_.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n axis : int or str, optional\n The array axis along which to take the derivative. If `f` is ndarray-like, must be an\n integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate\n dimension name or the axis type) or integer (referring to axis number), unless using\n implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults\n to 0.\n x : array-like, optional\n The coordinate values corresponding to the grid points in `f`.\n delta : array-like, optional\n Spacing between the grid points in `f`. There should be one item less than the size\n of `f` along `axis`.\n\n Returns\n -------\n array-like\n The second derivative calculated along the selected axis.\n\n See Also\n --------\n first_derivative\n\n '
(n, axis, delta) = _process_deriv_args(f, kwargs)
slice0 = ([slice(None)] * n)
slice1 = ([slice(None)] * n)
slice2 = ([slice(None)] * n)
delta_slice0 = ([slice(None)] * n)
delta_slice1 = ([slice(None)] * n)
slice0[axis] = slice(None, (- 2))
slice1[axis] = slice(1, (- 1))
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, (- 1))
delta_slice1[axis] = slice(1, None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
center = (2 * (((f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])) - (f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)]))) + (f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))))
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
left = (2 * (((f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])) - (f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)]))) + (f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))))
slice0[axis] = slice((- 3), (- 2))
slice1[axis] = slice((- 2), (- 1))
slice2[axis] = slice((- 1), None)
delta_slice0[axis] = slice((- 2), (- 1))
delta_slice1[axis] = slice((- 1), None)
combined_delta = (delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)])
right = (2 * (((f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])) - (f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)]))) + (f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))))
return concatenate((left, center, right), axis=axis)
|
@exporter.export
def gradient(f, **kwargs):
'Calculate the gradient of a grid of values.\n\n Works for both regularly-spaced data, and grids with varying spacing.\n\n Either `coordinates` or `deltas` must be specified, or `f` must be given as an\n `xarray.DataArray` with attached coordinate and projection information. If `f` is an\n `xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a\n `pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if\n neither `coordinates` nor `deltas` are given, the attached coordinate information belonging\n to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n coordinates : array-like, optional\n Sequence of arrays containing the coordinate values corresponding to the\n grid points in `f` in axis order.\n deltas : array-like, optional\n Sequence of arrays or scalars that specify the spacing between the grid points in `f`\n in axis order. There should be one item less than the size of `f` along the applicable\n axis.\n axes : sequence, optional\n Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to\n `pint.Quantity` is not used) or integers that specify the array axes along which to\n take the derivatives. Defaults to all axes of `f`. If given, and used with\n `coordinates` or `deltas`, its length must be less than or equal to that of the\n `coordinates` or `deltas` given.\n\n Returns\n -------\n tuple of array-like\n The first derivative calculated along each specified axis of the original array\n\n See Also\n --------\n laplacian, first_derivative\n\n Notes\n -----\n If this function is used without the `axes` parameter, the length of `coordinates` or\n `deltas` (as applicable) should match the number of dimensions of `f`.\n\n '
(pos_kwarg, positions, axes) = _process_gradient_args(f, kwargs)
return tuple((first_derivative(f, axis=axis, **{pos_kwarg: positions[ind]}) for (ind, axis) in enumerate(axes)))
| 4,592,195,275,797,524,000
|
Calculate the gradient of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
Sequence of arrays containing the coordinate values corresponding to the
grid points in `f` in axis order.
deltas : array-like, optional
Sequence of arrays or scalars that specify the spacing between the grid points in `f`
in axis order. There should be one item less than the size of `f` along the applicable
axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
tuple of array-like
The first derivative calculated along each specified axis of the original array
See Also
--------
laplacian, first_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
|
src/metpy/calc/tools.py
|
gradient
|
Exi666/MetPy
|
python
|
@exporter.export
def gradient(f, **kwargs):
'Calculate the gradient of a grid of values.\n\n Works for both regularly-spaced data, and grids with varying spacing.\n\n Either `coordinates` or `deltas` must be specified, or `f` must be given as an\n `xarray.DataArray` with attached coordinate and projection information. If `f` is an\n `xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a\n `pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if\n neither `coordinates` nor `deltas` are given, the attached coordinate information belonging\n to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n coordinates : array-like, optional\n Sequence of arrays containing the coordinate values corresponding to the\n grid points in `f` in axis order.\n deltas : array-like, optional\n Sequence of arrays or scalars that specify the spacing between the grid points in `f`\n in axis order. There should be one item less than the size of `f` along the applicable\n axis.\n axes : sequence, optional\n Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to\n `pint.Quantity` is not used) or integers that specify the array axes along which to\n take the derivatives. Defaults to all axes of `f`. If given, and used with\n `coordinates` or `deltas`, its length must be less than or equal to that of the\n `coordinates` or `deltas` given.\n\n Returns\n -------\n tuple of array-like\n The first derivative calculated along each specified axis of the original array\n\n See Also\n --------\n laplacian, first_derivative\n\n Notes\n -----\n If this function is used without the `axes` parameter, the length of `coordinates` or\n `deltas` (as applicable) should match the number of dimensions of `f`.\n\n '
(pos_kwarg, positions, axes) = _process_gradient_args(f, kwargs)
return tuple((first_derivative(f, axis=axis, **{pos_kwarg: positions[ind]}) for (ind, axis) in enumerate(axes)))
|
@exporter.export
def laplacian(f, **kwargs):
'Calculate the laplacian of a grid of values.\n\n Works for both regularly-spaced data, and grids with varying spacing.\n\n Either `coordinates` or `deltas` must be specified, or `f` must be given as an\n `xarray.DataArray` with attached coordinate and projection information. If `f` is an\n `xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a\n `pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if\n neither `coordinates` nor `deltas` are given, the attached coordinate information belonging\n to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n coordinates : array-like, optional\n The coordinate values corresponding to the grid points in `f`\n deltas : array-like, optional\n Spacing between the grid points in `f`. There should be one item less than the size\n of `f` along the applicable axis.\n axes : sequence, optional\n Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to\n `pint.Quantity` is not used) or integers that specify the array axes along which to\n take the derivatives. Defaults to all axes of `f`. If given, and used with\n `coordinates` or `deltas`, its length must be less than or equal to that of the\n `coordinates` or `deltas` given.\n\n Returns\n -------\n array-like\n The laplacian\n\n See Also\n --------\n gradient, second_derivative\n\n Notes\n -----\n If this function is used without the `axes` parameter, the length of `coordinates` or\n `deltas` (as applicable) should match the number of dimensions of `f`.\n\n '
(pos_kwarg, positions, axes) = _process_gradient_args(f, kwargs)
derivs = [second_derivative(f, axis=axis, **{pos_kwarg: positions[ind]}) for (ind, axis) in enumerate(axes)]
laplac = sum(derivs)
if isinstance(derivs[0], xr.DataArray):
laplac.attrs['units'] = derivs[0].attrs['units']
return laplac
| 874,958,084,346,929,200
|
Calculate the laplacian of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
The coordinate values corresponding to the grid points in `f`
deltas : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along the applicable axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
array-like
The laplacian
See Also
--------
gradient, second_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
|
src/metpy/calc/tools.py
|
laplacian
|
Exi666/MetPy
|
python
|
@exporter.export
def laplacian(f, **kwargs):
'Calculate the laplacian of a grid of values.\n\n Works for both regularly-spaced data, and grids with varying spacing.\n\n Either `coordinates` or `deltas` must be specified, or `f` must be given as an\n `xarray.DataArray` with attached coordinate and projection information. If `f` is an\n `xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a\n `pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if\n neither `coordinates` nor `deltas` are given, the attached coordinate information belonging\n to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n coordinates : array-like, optional\n The coordinate values corresponding to the grid points in `f`\n deltas : array-like, optional\n Spacing between the grid points in `f`. There should be one item less than the size\n of `f` along the applicable axis.\n axes : sequence, optional\n Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to\n `pint.Quantity` is not used) or integers that specify the array axes along which to\n take the derivatives. Defaults to all axes of `f`. If given, and used with\n `coordinates` or `deltas`, its length must be less than or equal to that of the\n `coordinates` or `deltas` given.\n\n Returns\n -------\n array-like\n The laplacian\n\n See Also\n --------\n gradient, second_derivative\n\n Notes\n -----\n If this function is used without the `axes` parameter, the length of `coordinates` or\n `deltas` (as applicable) should match the number of dimensions of `f`.\n\n '
(pos_kwarg, positions, axes) = _process_gradient_args(f, kwargs)
derivs = [second_derivative(f, axis=axis, **{pos_kwarg: positions[ind]}) for (ind, axis) in enumerate(axes)]
laplac = sum(derivs)
if isinstance(derivs[0], xr.DataArray):
laplac.attrs['units'] = derivs[0].attrs['units']
return laplac
|
def _broadcast_to_axis(arr, axis, ndim):
'Handle reshaping coordinate array to have proper dimensionality.\n\n This puts the values along the specified axis.\n '
if ((arr.ndim == 1) and (arr.ndim < ndim)):
new_shape = ([1] * ndim)
new_shape[axis] = arr.size
arr = arr.reshape(*new_shape)
return arr
| -5,780,615,902,284,994,000
|
Handle reshaping coordinate array to have proper dimensionality.
This puts the values along the specified axis.
|
src/metpy/calc/tools.py
|
_broadcast_to_axis
|
Exi666/MetPy
|
python
|
def _broadcast_to_axis(arr, axis, ndim):
'Handle reshaping coordinate array to have proper dimensionality.\n\n This puts the values along the specified axis.\n '
if ((arr.ndim == 1) and (arr.ndim < ndim)):
new_shape = ([1] * ndim)
new_shape[axis] = arr.size
arr = arr.reshape(*new_shape)
return arr
|
def _process_gradient_args(f, kwargs):
'Handle common processing of arguments for gradient and gradient-like functions.'
axes = kwargs.get('axes', range(f.ndim))
def _check_length(positions):
if (('axes' in kwargs) and (len(positions) < len(axes))):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that of "axes".')
elif (('axes' not in kwargs) and (len(positions) != len(axes))):
raise ValueError('Length of "coordinates" or "deltas" must match the number of dimensions of "f" when "axes" is not given.')
if ('deltas' in kwargs):
if (('coordinates' in kwargs) or ('x' in kwargs)):
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(kwargs['deltas'])
return ('delta', kwargs['deltas'], axes)
elif ('coordinates' in kwargs):
_check_length(kwargs['coordinates'])
return ('x', kwargs['coordinates'], axes)
elif isinstance(f, xr.DataArray):
return ('pass', axes, axes)
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions when "f" is not a DataArray.')
| 5,760,192,490,319,572,000
|
Handle common processing of arguments for gradient and gradient-like functions.
|
src/metpy/calc/tools.py
|
_process_gradient_args
|
Exi666/MetPy
|
python
|
def _process_gradient_args(f, kwargs):
axes = kwargs.get('axes', range(f.ndim))
def _check_length(positions):
if (('axes' in kwargs) and (len(positions) < len(axes))):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that of "axes".')
elif (('axes' not in kwargs) and (len(positions) != len(axes))):
raise ValueError('Length of "coordinates" or "deltas" must match the number of dimensions of "f" when "axes" is not given.')
if ('deltas' in kwargs):
if (('coordinates' in kwargs) or ('x' in kwargs)):
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(kwargs['deltas'])
return ('delta', kwargs['deltas'], axes)
elif ('coordinates' in kwargs):
_check_length(kwargs['coordinates'])
return ('x', kwargs['coordinates'], axes)
elif isinstance(f, xr.DataArray):
return ('pass', axes, axes)
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions when "f" is not a DataArray.')
|
def _process_deriv_args(f, kwargs):
'Handle common processing of arguments for derivative functions.'
n = f.ndim
axis = normalize_axis_index(kwargs.get('axis', 0), n)
if (f.shape[axis] < 3):
raise ValueError('f must have at least 3 point along the desired axis.')
if ('delta' in kwargs):
if ('x' in kwargs):
raise ValueError('Cannot specify both "x" and "delta".')
delta = atleast_1d(kwargs['delta'])
if (delta.size == 1):
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if ((not hasattr(delta, 'units')) and (delta_units is not None)):
delta = (delta * delta_units)
else:
delta = _broadcast_to_axis(delta, axis, n)
elif ('x' in kwargs):
x = _broadcast_to_axis(kwargs['x'], axis, n)
delta = diff(x, axis=axis)
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return (n, axis, delta)
| -7,415,067,897,977,236,000
|
Handle common processing of arguments for derivative functions.
|
src/metpy/calc/tools.py
|
_process_deriv_args
|
Exi666/MetPy
|
python
|
def _process_deriv_args(f, kwargs):
n = f.ndim
axis = normalize_axis_index(kwargs.get('axis', 0), n)
if (f.shape[axis] < 3):
raise ValueError('f must have at least 3 point along the desired axis.')
if ('delta' in kwargs):
if ('x' in kwargs):
raise ValueError('Cannot specify both "x" and "delta".')
delta = atleast_1d(kwargs['delta'])
if (delta.size == 1):
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if ((not hasattr(delta, 'units')) and (delta_units is not None)):
delta = (delta * delta_units)
else:
delta = _broadcast_to_axis(delta, axis, n)
elif ('x' in kwargs):
x = _broadcast_to_axis(kwargs['x'], axis, n)
delta = diff(x, axis=axis)
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return (n, axis, delta)
|
@exporter.export
@preprocess_xarray
def parse_angle(input_dir):
'Calculate the meteorological angle from directional text.\n\n Works for abbrieviations or whole words (E -> 90 | South -> 180)\n and also is able to parse 22.5 degreee angles such as ESE/East South East\n\n Parameters\n ----------\n input_dir : string or array-like\n Directional text such as west, [south-west, ne], etc\n\n Returns\n -------\n `pint.Quantity`\n The angle in degrees\n\n '
if isinstance(input_dir, str):
abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)])
elif hasattr(input_dir, '__len__'):
input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True))
abb_dir_str = _abbrieviate_direction(input_dir_str)
abb_dirs = _clean_direction(abb_dir_str.split(','))
else:
return np.nan
return itemgetter(*abb_dirs)(DIR_DICT)
| -2,738,574,353,686,353,400
|
Calculate the meteorological angle from directional text.
Works for abbrieviations or whole words (E -> 90 | South -> 180)
and also is able to parse 22.5 degreee angles such as ESE/East South East
Parameters
----------
input_dir : string or array-like
Directional text such as west, [south-west, ne], etc
Returns
-------
`pint.Quantity`
The angle in degrees
|
src/metpy/calc/tools.py
|
parse_angle
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
def parse_angle(input_dir):
'Calculate the meteorological angle from directional text.\n\n Works for abbrieviations or whole words (E -> 90 | South -> 180)\n and also is able to parse 22.5 degreee angles such as ESE/East South East\n\n Parameters\n ----------\n input_dir : string or array-like\n Directional text such as west, [south-west, ne], etc\n\n Returns\n -------\n `pint.Quantity`\n The angle in degrees\n\n '
if isinstance(input_dir, str):
abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)])
elif hasattr(input_dir, '__len__'):
input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True))
abb_dir_str = _abbrieviate_direction(input_dir_str)
abb_dirs = _clean_direction(abb_dir_str.split(','))
else:
return np.nan
return itemgetter(*abb_dirs)(DIR_DICT)
|
def _clean_direction(dir_list, preprocess=False):
'Handle None if preprocess, else handles anything not in DIR_STRS.'
if preprocess:
return [(UND if (not isinstance(the_dir, str)) else the_dir) for the_dir in dir_list]
else:
return [(UND if (the_dir not in DIR_STRS) else the_dir) for the_dir in dir_list]
| -4,458,721,442,283,602,400
|
Handle None if preprocess, else handles anything not in DIR_STRS.
|
src/metpy/calc/tools.py
|
_clean_direction
|
Exi666/MetPy
|
python
|
def _clean_direction(dir_list, preprocess=False):
if preprocess:
return [(UND if (not isinstance(the_dir, str)) else the_dir) for the_dir in dir_list]
else:
return [(UND if (the_dir not in DIR_STRS) else the_dir) for the_dir in dir_list]
|
def _abbrieviate_direction(ext_dir_str):
'Convert extended (non-abbrievated) directions to abbrieviation.'
return ext_dir_str.upper().replace('_', '').replace('-', '').replace(' ', '').replace('NORTH', 'N').replace('EAST', 'E').replace('SOUTH', 'S').replace('WEST', 'W')
| 4,715,359,313,793,490,000
|
Convert extended (non-abbrievated) directions to abbrieviation.
|
src/metpy/calc/tools.py
|
_abbrieviate_direction
|
Exi666/MetPy
|
python
|
def _abbrieviate_direction(ext_dir_str):
return ext_dir_str.upper().replace('_', ).replace('-', ).replace(' ', ).replace('NORTH', 'N').replace('EAST', 'E').replace('SOUTH', 'S').replace('WEST', 'W')
|
@exporter.export
@preprocess_xarray
def angle_to_direction(input_angle, full=False, level=3):
'Convert the meteorological angle to directional text.\n\n Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)\n and rounds to the nearest angle (355 -> N | 404 -> NNE)\n\n Parameters\n ----------\n input_angle : numeric or array-like numeric\n Angles such as 0, 25, 45, 360, 410, etc\n full : boolean\n True returns full text (South), False returns abbrieviated text (S)\n level : int\n Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)\n\n Returns\n -------\n direction\n The directional text\n\n '
try:
origin_units = input_angle.units
input_angle = input_angle.m
except AttributeError:
origin_units = units.degree
if ((not hasattr(input_angle, '__len__')) or isinstance(input_angle, str)):
input_angle = [input_angle]
scalar = True
else:
scalar = False
input_angle = np.array(input_angle).astype(float)
with np.errstate(invalid='ignore'):
input_angle[np.where((input_angle < 0))] = np.nan
input_angle = (input_angle * origin_units)
normalizer = np.array((input_angle.m / MAX_DEGREE_ANGLE.m), dtype=int)
norm_angles = abs((input_angle - (MAX_DEGREE_ANGLE * normalizer)))
if (level == 3):
nskip = 1
elif (level == 2):
nskip = 2
elif (level == 1):
nskip = 4
else:
err_msg = 'Level of complexity cannot be less than 1 or greater than 3!'
raise ValueError(err_msg)
angle_dict = {((i * BASE_DEGREE_MULTIPLIER.m) * nskip): dir_str for (i, dir_str) in enumerate(DIR_STRS[::nskip])}
angle_dict[MAX_DEGREE_ANGLE.m] = 'N'
angle_dict[UND_ANGLE] = UND
multiplier = np.round((((norm_angles / BASE_DEGREE_MULTIPLIER) / nskip) - 0.001)).m
round_angles = ((multiplier * BASE_DEGREE_MULTIPLIER.m) * nskip)
round_angles[np.where(np.isnan(round_angles))] = UND_ANGLE
dir_str_arr = itemgetter(*round_angles)(angle_dict)
if full:
dir_str_arr = ','.join(dir_str_arr)
dir_str_arr = _unabbrieviate_direction(dir_str_arr)
if (not scalar):
dir_str = dir_str_arr.split(',')
else:
dir_str = dir_str_arr.replace(',', ' ')
else:
dir_str = dir_str_arr
return dir_str
| -3,599,905,639,071,405,000
|
Convert the meteorological angle to directional text.
Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)
and rounds to the nearest angle (355 -> N | 404 -> NNE)
Parameters
----------
input_angle : numeric or array-like numeric
Angles such as 0, 25, 45, 360, 410, etc
full : boolean
True returns full text (South), False returns abbrieviated text (S)
level : int
Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)
Returns
-------
direction
The directional text
|
src/metpy/calc/tools.py
|
angle_to_direction
|
Exi666/MetPy
|
python
|
@exporter.export
@preprocess_xarray
def angle_to_direction(input_angle, full=False, level=3):
'Convert the meteorological angle to directional text.\n\n Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)\n and rounds to the nearest angle (355 -> N | 404 -> NNE)\n\n Parameters\n ----------\n input_angle : numeric or array-like numeric\n Angles such as 0, 25, 45, 360, 410, etc\n full : boolean\n True returns full text (South), False returns abbrieviated text (S)\n level : int\n Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)\n\n Returns\n -------\n direction\n The directional text\n\n '
try:
origin_units = input_angle.units
input_angle = input_angle.m
except AttributeError:
origin_units = units.degree
if ((not hasattr(input_angle, '__len__')) or isinstance(input_angle, str)):
input_angle = [input_angle]
scalar = True
else:
scalar = False
input_angle = np.array(input_angle).astype(float)
with np.errstate(invalid='ignore'):
input_angle[np.where((input_angle < 0))] = np.nan
input_angle = (input_angle * origin_units)
normalizer = np.array((input_angle.m / MAX_DEGREE_ANGLE.m), dtype=int)
norm_angles = abs((input_angle - (MAX_DEGREE_ANGLE * normalizer)))
if (level == 3):
nskip = 1
elif (level == 2):
nskip = 2
elif (level == 1):
nskip = 4
else:
err_msg = 'Level of complexity cannot be less than 1 or greater than 3!'
raise ValueError(err_msg)
angle_dict = {((i * BASE_DEGREE_MULTIPLIER.m) * nskip): dir_str for (i, dir_str) in enumerate(DIR_STRS[::nskip])}
angle_dict[MAX_DEGREE_ANGLE.m] = 'N'
angle_dict[UND_ANGLE] = UND
multiplier = np.round((((norm_angles / BASE_DEGREE_MULTIPLIER) / nskip) - 0.001)).m
round_angles = ((multiplier * BASE_DEGREE_MULTIPLIER.m) * nskip)
round_angles[np.where(np.isnan(round_angles))] = UND_ANGLE
dir_str_arr = itemgetter(*round_angles)(angle_dict)
if full:
dir_str_arr = ','.join(dir_str_arr)
dir_str_arr = _unabbrieviate_direction(dir_str_arr)
if (not scalar):
dir_str = dir_str_arr.split(',')
else:
dir_str = dir_str_arr.replace(',', ' ')
else:
dir_str = dir_str_arr
return dir_str
|
def _unabbrieviate_direction(abb_dir_str):
'Convert abbrieviated directions to non-abbrieviated direction.'
return abb_dir_str.upper().replace(UND, 'Undefined ').replace('N', 'North ').replace('E', 'East ').replace('S', 'South ').replace('W', 'West ').replace(' ,', ',').strip()
| 3,144,671,443,861,817,000
|
Convert abbrieviated directions to non-abbrieviated direction.
|
src/metpy/calc/tools.py
|
_unabbrieviate_direction
|
Exi666/MetPy
|
python
|
def _unabbrieviate_direction(abb_dir_str):
return abb_dir_str.upper().replace(UND, 'Undefined ').replace('N', 'North ').replace('E', 'East ').replace('S', 'South ').replace('W', 'West ').replace(' ,', ',').strip()
|
def _remove_nans(*variables):
'Remove NaNs from arrays that cause issues with calculations.\n\n Takes a variable number of arguments\n Returns masked arrays in the same order as provided\n '
mask = None
for v in variables:
if (mask is None):
mask = np.isnan(v)
else:
mask |= np.isnan(v)
ret = []
for v in variables:
ret.append(v[(~ mask)])
return ret
| 3,602,438,539,218,065,000
|
Remove NaNs from arrays that cause issues with calculations.
Takes a variable number of arguments
Returns masked arrays in the same order as provided
|
src/metpy/calc/tools.py
|
_remove_nans
|
Exi666/MetPy
|
python
|
def _remove_nans(*variables):
'Remove NaNs from arrays that cause issues with calculations.\n\n Takes a variable number of arguments\n Returns masked arrays in the same order as provided\n '
mask = None
for v in variables:
if (mask is None):
mask = np.isnan(v)
else:
mask |= np.isnan(v)
ret = []
for v in variables:
ret.append(v[(~ mask)])
return ret
|
def get_resources(self, circuit: Union[(Circuit, ResultHandle)]) -> ResourcesResult:
'Calculate resource estimates for circuit.\n\n :param circuit: Circuit to calculate or result handle to retrieve for\n :type circuit: Union[Circuit, ResultHandle]\n :return: Resource estimate\n :rtype: Dict[str, int]\n '
if isinstance(circuit, Circuit):
handle = self.process_circuits([circuit])[0]
elif isinstance(circuit, ResultHandle):
handle = circuit
circ_status = self.circuit_status(handle)
if (circ_status.status is not StatusEnum.COMPLETED):
raise ValueError(f"Handle is '{circ_status}'")
else:
raise TypeError('Provide either a Circuit to run or a ResultHandle to a previously submitted circuit.')
return self._cache[handle]['resource']
| 8,368,390,466,831,838,000
|
Calculate resource estimates for circuit.
:param circuit: Circuit to calculate or result handle to retrieve for
:type circuit: Union[Circuit, ResultHandle]
:return: Resource estimate
:rtype: Dict[str, int]
|
modules/pytket-qsharp/pytket/extensions/qsharp/backends/estimator.py
|
get_resources
|
dhaycraft/pytket-extensions
|
python
|
def get_resources(self, circuit: Union[(Circuit, ResultHandle)]) -> ResourcesResult:
'Calculate resource estimates for circuit.\n\n :param circuit: Circuit to calculate or result handle to retrieve for\n :type circuit: Union[Circuit, ResultHandle]\n :return: Resource estimate\n :rtype: Dict[str, int]\n '
if isinstance(circuit, Circuit):
handle = self.process_circuits([circuit])[0]
elif isinstance(circuit, ResultHandle):
handle = circuit
circ_status = self.circuit_status(handle)
if (circ_status.status is not StatusEnum.COMPLETED):
raise ValueError(f"Handle is '{circ_status}'")
else:
raise TypeError('Provide either a Circuit to run or a ResultHandle to a previously submitted circuit.')
return self._cache[handle]['resource']
|
def parse(self, response):
'\n `parse` should always `yield` a dict that follows the Event Schema\n <https://city-bureau.github.io/city-scrapers/06_event_schema.html>.\n\n Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping\n needs.\n '
month_counter = datetime.today().month
for x in range(12):
if (month_counter > 12):
break
else:
for item in response.css('ul').css('li')[17:]:
try:
if (item.css('strong').css('a::attr(href)').extract()[0] == 'http://www.humboldtparkportal.org/'):
continue
except:
pass
data = {'_type': 'event', 'name': self._parse_name(item), 'description': self._parse_description(item), 'classification': self._parse_classification(item), 'start_time': self._parse_start(item, month_counter), 'end_time': self._parse_end(item), 'timezone': self._parse_timezone(item), 'status': self._parse_status(item), 'all_day': self._parse_all_day(item), 'location': self._parse_location(item), 'sources': self._parse_sources(response), 'community_area': self._parse_community_area(item)}
data['id'] = self._generate_id(data)
data['end_time'] = (data['start_time'] + timedelta(hours=3))
(yield data)
month_counter += 1
| -7,782,619,201,033,553,000
|
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
parse
|
jim/city-scrapers
|
python
|
def parse(self, response):
'\n `parse` should always `yield` a dict that follows the Event Schema\n <https://city-bureau.github.io/city-scrapers/06_event_schema.html>.\n\n Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping\n needs.\n '
month_counter = datetime.today().month
for x in range(12):
if (month_counter > 12):
break
else:
for item in response.css('ul').css('li')[17:]:
try:
if (item.css('strong').css('a::attr(href)').extract()[0] == 'http://www.humboldtparkportal.org/'):
continue
except:
pass
data = {'_type': 'event', 'name': self._parse_name(item), 'description': self._parse_description(item), 'classification': self._parse_classification(item), 'start_time': self._parse_start(item, month_counter), 'end_time': self._parse_end(item), 'timezone': self._parse_timezone(item), 'status': self._parse_status(item), 'all_day': self._parse_all_day(item), 'location': self._parse_location(item), 'sources': self._parse_sources(response), 'community_area': self._parse_community_area(item)}
data['id'] = self._generate_id(data)
data['end_time'] = (data['start_time'] + timedelta(hours=3))
(yield data)
month_counter += 1
|
def _parse_community_area(self, item):
'\n Parse or generate community area.\n '
if (len(item.css('li').css('strong::text').extract()) == 1):
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0]
| 2,955,008,406,036,623,400
|
Parse or generate community area.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_community_area
|
jim/city-scrapers
|
python
|
def _parse_community_area(self, item):
'\n \n '
if (len(item.css('li').css('strong::text').extract()) == 1):
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0]
|
def _parse_name(self, item):
'\n Parse or generate event name.\n '
if (len(item.css('li').css('strong::text').extract()) == 1):
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return (community_name[0] + ' Community Action Council')
| -3,257,092,148,956,459,000
|
Parse or generate event name.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_name
|
jim/city-scrapers
|
python
|
def _parse_name(self, item):
'\n \n '
if (len(item.css('li').css('strong::text').extract()) == 1):
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return (community_name[0] + ' Community Action Council')
|
def _parse_description(self, item):
'\n Parse or generate event description.\n '
return "Community Action Councils, or CACs, consist of 25-30 voting members who are directly involved in developing a strategic plan for educational success within their communities. CAC members include parents; elected officials; faith-based institutions, health care and community-based organizations; Local School Council (LSC) members; business leaders; educators and school administrators; staff members from Chicago's Sister Agencies; community residents; and students. There are nine CACs across Chicago. Each works to empower the community they serve to lead the improvement of local quality education."
| -792,315,873,038,031,000
|
Parse or generate event description.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_description
|
jim/city-scrapers
|
python
|
def _parse_description(self, item):
'\n \n '
return "Community Action Councils, or CACs, consist of 25-30 voting members who are directly involved in developing a strategic plan for educational success within their communities. CAC members include parents; elected officials; faith-based institutions, health care and community-based organizations; Local School Council (LSC) members; business leaders; educators and school administrators; staff members from Chicago's Sister Agencies; community residents; and students. There are nine CACs across Chicago. Each works to empower the community they serve to lead the improvement of local quality education."
|
def _parse_classification(self, item):
'\n Parse or generate classification (e.g. public health, education, etc).\n '
return 'Education'
| -5,466,161,057,394,642,000
|
Parse or generate classification (e.g. public health, education, etc).
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_classification
|
jim/city-scrapers
|
python
|
def _parse_classification(self, item):
'\n \n '
return 'Education'
|
def _parse_start(self, item, month_counter):
'\n Parse start date and time.\n\n Accepts month_counter as an argument from top level parse function to iterate through all months in the year.\n '
def parse_day(source):
'Parses the source material and retrieves the day of the week that the meeting occurs.\n '
day_source = source[0]
day_regex = re.compile('[a-zA-Z]+day')
mo = day_regex.search(day_source)
return mo.group().lower()
def parse_time(source):
'Parses the source material and retrieves the time that the meeting occurs.\n '
time_source = source[1]
time_regex = re.compile('(1[012]|[1-9]):[0-5][0-9](am|pm)')
mo = time_regex.search(time_source)
return mo.group()
def count_days(day, week_count):
'Because the source material provides meeting dates on a reoccuring schedule, we must use the parsed day\n from the parse_day function and the '
today = datetime.today()
week_day = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5, 'sunday': 6}
week_counter = 0
for x in range(1, 31):
try:
current_date = datetime(today.year, month_counter, x)
if (current_date.weekday() == week_day[day]):
week_counter += 1
if (week_counter == int(week_count)):
return current_date
except ValueError as e:
break
def concat_date(meeting_date, time):
'Combines the meeting date with the time the meeting occurs. Function return a datetime\n object.\n '
return dateparse(((((((str(meeting_date.year) + '-') + str(meeting_date.month)) + '-') + str(meeting_date.day)) + ' ') + time))
def get_start(source):
'Combines above defined parse_day, parse_time, count_days, and concat_date functions to get the start\n date from the source. If a start time cannot be found the UNIX epoch date is returned.\n '
day = parse_day(source)
week_count = source[0].strip()[0]
if week_count.isdigit():
time = parse_time(source)
meeting_date = count_days(day, week_count)
start = concat_date(meeting_date, time)
else:
pass
return start
source = item.css('li::text').extract()
return get_start(source)
| 2,692,572,152,009,959,400
|
Parse start date and time.
Accepts month_counter as an argument from top level parse function to iterate through all months in the year.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_start
|
jim/city-scrapers
|
python
|
def _parse_start(self, item, month_counter):
'\n Parse start date and time.\n\n Accepts month_counter as an argument from top level parse function to iterate through all months in the year.\n '
def parse_day(source):
'Parses the source material and retrieves the day of the week that the meeting occurs.\n '
day_source = source[0]
day_regex = re.compile('[a-zA-Z]+day')
mo = day_regex.search(day_source)
return mo.group().lower()
def parse_time(source):
'Parses the source material and retrieves the time that the meeting occurs.\n '
time_source = source[1]
time_regex = re.compile('(1[012]|[1-9]):[0-5][0-9](am|pm)')
mo = time_regex.search(time_source)
return mo.group()
def count_days(day, week_count):
'Because the source material provides meeting dates on a reoccuring schedule, we must use the parsed day\n from the parse_day function and the '
today = datetime.today()
week_day = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5, 'sunday': 6}
week_counter = 0
for x in range(1, 31):
try:
current_date = datetime(today.year, month_counter, x)
if (current_date.weekday() == week_day[day]):
week_counter += 1
if (week_counter == int(week_count)):
return current_date
except ValueError as e:
break
def concat_date(meeting_date, time):
'Combines the meeting date with the time the meeting occurs. Function return a datetime\n object.\n '
return dateparse(((((((str(meeting_date.year) + '-') + str(meeting_date.month)) + '-') + str(meeting_date.day)) + ' ') + time))
def get_start(source):
'Combines above defined parse_day, parse_time, count_days, and concat_date functions to get the start\n date from the source. If a start time cannot be found the UNIX epoch date is returned.\n '
day = parse_day(source)
week_count = source[0].strip()[0]
if week_count.isdigit():
time = parse_time(source)
meeting_date = count_days(day, week_count)
start = concat_date(meeting_date, time)
else:
pass
return start
source = item.css('li::text').extract()
return get_start(source)
|
def _parse_end(self, item):
'\n Parse end date and time.\n '
return 'Estimated 3 hours'
| -8,884,588,212,588,568,000
|
Parse end date and time.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_end
|
jim/city-scrapers
|
python
|
def _parse_end(self, item):
'\n \n '
return 'Estimated 3 hours'
|
def _parse_timezone(self, item):
'\n Parse or generate timzone in tzinfo format.\n '
return 'America/Chicago'
| -4,945,136,529,224,096,000
|
Parse or generate timzone in tzinfo format.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_timezone
|
jim/city-scrapers
|
python
|
def _parse_timezone(self, item):
'\n \n '
return 'America/Chicago'
|
def _parse_all_day(self, item):
'\n Parse or generate all-day status. Defaults to False.\n '
return False
| 8,632,466,700,201,192,000
|
Parse or generate all-day status. Defaults to False.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_all_day
|
jim/city-scrapers
|
python
|
def _parse_all_day(self, item):
'\n \n '
return False
|
def _parse_location(self, item):
'\n Parse or generate location. Latitude and longitude can be\n left blank and will be geocoded later.\n '
source = item.css('li::text').extract()[1]
return {'url': None, 'name': source[(source.find('at') + 2):source.find('(')].replace('the', ''), 'address': source[(source.find('(') + 1):source.find(')')], 'coordinates': {'latitude': None, 'longitude': None}}
| 2,512,085,160,778,465,300
|
Parse or generate location. Latitude and longitude can be
left blank and will be geocoded later.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_location
|
jim/city-scrapers
|
python
|
def _parse_location(self, item):
'\n Parse or generate location. Latitude and longitude can be\n left blank and will be geocoded later.\n '
source = item.css('li::text').extract()[1]
return {'url': None, 'name': source[(source.find('at') + 2):source.find('(')].replace('the', ), 'address': source[(source.find('(') + 1):source.find(')')], 'coordinates': {'latitude': None, 'longitude': None}}
|
def _parse_status(self, item):
'\n Parse or generate status of meeting. Can be one of:\n * cancelled\n * tentative\n * confirmed\n * passed\n By default, return "tentative"\n '
return 'Tentative'
| -3,350,006,070,809,307,000
|
Parse or generate status of meeting. Can be one of:
* cancelled
* tentative
* confirmed
* passed
By default, return "tentative"
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_status
|
jim/city-scrapers
|
python
|
def _parse_status(self, item):
'\n Parse or generate status of meeting. Can be one of:\n * cancelled\n * tentative\n * confirmed\n * passed\n By default, return "tentative"\n '
return 'Tentative'
|
def _parse_sources(self, response):
'\n Parse or generate sources.\n '
return [{'url': response.url, 'note': ''}]
| 4,178,576,951,971,141,000
|
Parse or generate sources.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
_parse_sources
|
jim/city-scrapers
|
python
|
def _parse_sources(self, response):
'\n \n '
return [{'url': response.url, 'note': }]
|
def parse_day(source):
'Parses the source material and retrieves the day of the week that the meeting occurs.\n '
day_source = source[0]
day_regex = re.compile('[a-zA-Z]+day')
mo = day_regex.search(day_source)
return mo.group().lower()
| -1,311,422,417,374,438,700
|
Parses the source material and retrieves the day of the week that the meeting occurs.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
parse_day
|
jim/city-scrapers
|
python
|
def parse_day(source):
'\n '
day_source = source[0]
day_regex = re.compile('[a-zA-Z]+day')
mo = day_regex.search(day_source)
return mo.group().lower()
|
def parse_time(source):
'Parses the source material and retrieves the time that the meeting occurs.\n '
time_source = source[1]
time_regex = re.compile('(1[012]|[1-9]):[0-5][0-9](am|pm)')
mo = time_regex.search(time_source)
return mo.group()
| -2,985,683,150,497,293,000
|
Parses the source material and retrieves the time that the meeting occurs.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
parse_time
|
jim/city-scrapers
|
python
|
def parse_time(source):
'\n '
time_source = source[1]
time_regex = re.compile('(1[012]|[1-9]):[0-5][0-9](am|pm)')
mo = time_regex.search(time_source)
return mo.group()
|
def count_days(day, week_count):
'Because the source material provides meeting dates on a reoccuring schedule, we must use the parsed day\n from the parse_day function and the '
today = datetime.today()
week_day = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5, 'sunday': 6}
week_counter = 0
for x in range(1, 31):
try:
current_date = datetime(today.year, month_counter, x)
if (current_date.weekday() == week_day[day]):
week_counter += 1
if (week_counter == int(week_count)):
return current_date
except ValueError as e:
break
| 1,578,960,398,309,300,000
|
Because the source material provides meeting dates on a reoccuring schedule, we must use the parsed day
from the parse_day function and the
|
city_scrapers/spiders/chi_school_community_action_council.py
|
count_days
|
jim/city-scrapers
|
python
|
def count_days(day, week_count):
'Because the source material provides meeting dates on a reoccuring schedule, we must use the parsed day\n from the parse_day function and the '
today = datetime.today()
week_day = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5, 'sunday': 6}
week_counter = 0
for x in range(1, 31):
try:
current_date = datetime(today.year, month_counter, x)
if (current_date.weekday() == week_day[day]):
week_counter += 1
if (week_counter == int(week_count)):
return current_date
except ValueError as e:
break
|
def concat_date(meeting_date, time):
'Combines the meeting date with the time the meeting occurs. Function return a datetime\n object.\n '
return dateparse(((((((str(meeting_date.year) + '-') + str(meeting_date.month)) + '-') + str(meeting_date.day)) + ' ') + time))
| -473,956,566,229,258,050
|
Combines the meeting date with the time the meeting occurs. Function return a datetime
object.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
concat_date
|
jim/city-scrapers
|
python
|
def concat_date(meeting_date, time):
'Combines the meeting date with the time the meeting occurs. Function return a datetime\n object.\n '
return dateparse(((((((str(meeting_date.year) + '-') + str(meeting_date.month)) + '-') + str(meeting_date.day)) + ' ') + time))
|
def get_start(source):
'Combines above defined parse_day, parse_time, count_days, and concat_date functions to get the start\n date from the source. If a start time cannot be found the UNIX epoch date is returned.\n '
day = parse_day(source)
week_count = source[0].strip()[0]
if week_count.isdigit():
time = parse_time(source)
meeting_date = count_days(day, week_count)
start = concat_date(meeting_date, time)
else:
pass
return start
| 5,386,643,088,681,789,000
|
Combines above defined parse_day, parse_time, count_days, and concat_date functions to get the start
date from the source. If a start time cannot be found the UNIX epoch date is returned.
|
city_scrapers/spiders/chi_school_community_action_council.py
|
get_start
|
jim/city-scrapers
|
python
|
def get_start(source):
'Combines above defined parse_day, parse_time, count_days, and concat_date functions to get the start\n date from the source. If a start time cannot be found the UNIX epoch date is returned.\n '
day = parse_day(source)
week_count = source[0].strip()[0]
if week_count.isdigit():
time = parse_time(source)
meeting_date = count_days(day, week_count)
start = concat_date(meeting_date, time)
else:
pass
return start
|
def Run(self, args):
'Run the list command.'
client = registries.RegistriesClient()
registry_ref = args.CONCEPTS.registry.Parse()
registry = client.Get(registry_ref)
for (idx, credential) in enumerate(registry.credentials):
serializable = resource_projector.MakeSerializable(credential)
serializable['index'] = idx
(yield serializable)
| 5,482,512,737,309,670,000
|
Run the list command.
|
lib/surface/iot/registries/credentials/list.py
|
Run
|
bshaffer/google-cloud-sdk
|
python
|
def Run(self, args):
client = registries.RegistriesClient()
registry_ref = args.CONCEPTS.registry.Parse()
registry = client.Get(registry_ref)
for (idx, credential) in enumerate(registry.credentials):
serializable = resource_projector.MakeSerializable(credential)
serializable['index'] = idx
(yield serializable)
|
@property
def attention_weights(self) -> List:
'List with the attention weights. Each element of the list is a tuple\n where the first and the second elements are the column and row\n attention weights respectively\n\n The shape of the attention weights is:\n\n - column attention: :math:`(N, H, F, F)`\n\n - row attention: :math:`(1, H, N, N)`\n\n where *N* is the batch size, *H* is the number of heads and *F* is the\n number of features/columns in the dataset\n '
attention_weights = []
for blk in self.saint_blks:
attention_weights.append((blk.col_attn.attn_weights, blk.row_attn.attn_weights))
return attention_weights
| 8,980,829,787,046,245,000
|
List with the attention weights. Each element of the list is a tuple
where the first and the second elements are the column and row
attention weights respectively
The shape of the attention weights is:
- column attention: :math:`(N, H, F, F)`
- row attention: :math:`(1, H, N, N)`
where *N* is the batch size, *H* is the number of heads and *F* is the
number of features/columns in the dataset
|
pytorch_widedeep/models/tabular/transformers/saint.py
|
attention_weights
|
TangleSpace/pytorch-widedeep
|
python
|
@property
def attention_weights(self) -> List:
'List with the attention weights. Each element of the list is a tuple\n where the first and the second elements are the column and row\n attention weights respectively\n\n The shape of the attention weights is:\n\n - column attention: :math:`(N, H, F, F)`\n\n - row attention: :math:`(1, H, N, N)`\n\n where *N* is the batch size, *H* is the number of heads and *F* is the\n number of features/columns in the dataset\n '
attention_weights = []
for blk in self.saint_blks:
attention_weights.append((blk.col_attn.attn_weights, blk.row_attn.attn_weights))
return attention_weights
|
def execute_query(filename: str) -> Optional[pd.DataFrame]:
"Run SQL from a file. It will return a Pandas DataFrame if it selected\n anything; otherwise it will return None.\n\n I do not recommend you use this function too often. In general we should be\n using the SQLAlchemy ORM. That said, it's a nice convenience, and there are\n times where this function is genuinely something you want to run.\n "
with open(os.path.join(QUERIES_DIR, secure_filename(filename))) as f:
query = f.read()
with db.engine.connect() as conn:
res = conn.execute(query)
try:
df = pd.DataFrame(res.fetchall(), columns=res.keys())
return df
except ResourceClosedError:
return None
| 2,471,230,407,435,967,500
|
Run SQL from a file. It will return a Pandas DataFrame if it selected
anything; otherwise it will return None.
I do not recommend you use this function too often. In general we should be
using the SQLAlchemy ORM. That said, it's a nice convenience, and there are
times where this function is genuinely something you want to run.
|
backend/database/core.py
|
execute_query
|
stianberghansen/police-data-trust
|
python
|
def execute_query(filename: str) -> Optional[pd.DataFrame]:
"Run SQL from a file. It will return a Pandas DataFrame if it selected\n anything; otherwise it will return None.\n\n I do not recommend you use this function too often. In general we should be\n using the SQLAlchemy ORM. That said, it's a nice convenience, and there are\n times where this function is genuinely something you want to run.\n "
with open(os.path.join(QUERIES_DIR, secure_filename(filename))) as f:
query = f.read()
with db.engine.connect() as conn:
res = conn.execute(query)
try:
df = pd.DataFrame(res.fetchall(), columns=res.keys())
return df
except ResourceClosedError:
return None
|
@click.group('psql', cls=AppGroup)
@with_appcontext
@click.pass_context
def db_cli(ctx: click.Context):
'Collection of database commands.'
ctx.obj = connect(user=current_app.config['POSTGRES_USER'], password=current_app.config['POSTGRES_PASSWORD'], host=current_app.config['POSTGRES_HOST'], port=current_app.config['POSTGRES_PORT'], dbname='postgres')
| -4,530,861,317,515,405,000
|
Collection of database commands.
|
backend/database/core.py
|
db_cli
|
stianberghansen/police-data-trust
|
python
|
@click.group('psql', cls=AppGroup)
@with_appcontext
@click.pass_context
def db_cli(ctx: click.Context):
ctx.obj = connect(user=current_app.config['POSTGRES_USER'], password=current_app.config['POSTGRES_PASSWORD'], host=current_app.config['POSTGRES_HOST'], port=current_app.config['POSTGRES_PORT'], dbname='postgres')
|
@db_cli.command('create')
@click.option('--overwrite/--no-overwrite', default=False, is_flag=True, show_default=True, help='If true, overwrite the database if it exists.')
@pass_psql_admin_connection
@click.pass_context
@dev_only
def create_database(ctx: click.Context, conn: connection, overwrite: bool=False):
'Create the database from nothing.'
database = current_app.config['POSTGRES_DB']
cursor = conn.cursor()
cursor.execute('ROLLBACK')
if overwrite:
cursor.execute(f"SELECT bool_or(datname = '{database}') FROM pg_database;")
exists = cursor.fetchall()[0][0]
if exists:
ctx.invoke(delete_database)
try:
cursor.execute(f'CREATE DATABASE {database};')
except psycopg2.errors.lookup('42P04'):
click.echo(f'Database {database!r} already exists.')
cursor.execute('ROLLBACK')
else:
click.echo(f'Created database {database!r}.')
| -7,738,917,581,153,133,000
|
Create the database from nothing.
|
backend/database/core.py
|
create_database
|
stianberghansen/police-data-trust
|
python
|
@db_cli.command('create')
@click.option('--overwrite/--no-overwrite', default=False, is_flag=True, show_default=True, help='If true, overwrite the database if it exists.')
@pass_psql_admin_connection
@click.pass_context
@dev_only
def create_database(ctx: click.Context, conn: connection, overwrite: bool=False):
database = current_app.config['POSTGRES_DB']
cursor = conn.cursor()
cursor.execute('ROLLBACK')
if overwrite:
cursor.execute(f"SELECT bool_or(datname = '{database}') FROM pg_database;")
exists = cursor.fetchall()[0][0]
if exists:
ctx.invoke(delete_database)
try:
cursor.execute(f'CREATE DATABASE {database};')
except psycopg2.errors.lookup('42P04'):
click.echo(f'Database {database!r} already exists.')
cursor.execute('ROLLBACK')
else:
click.echo(f'Created database {database!r}.')
|
@db_cli.command('init')
def init_database():
'Initialize the database schemas.\n\n Run this after the database has been created.\n '
database = current_app.config['POSTGRES_DB']
db.create_all()
click.echo(f'Initialized the database {database!r}.')
| -6,951,266,486,569,976,000
|
Initialize the database schemas.
Run this after the database has been created.
|
backend/database/core.py
|
init_database
|
stianberghansen/police-data-trust
|
python
|
@db_cli.command('init')
def init_database():
'Initialize the database schemas.\n\n Run this after the database has been created.\n '
database = current_app.config['POSTGRES_DB']
db.create_all()
click.echo(f'Initialized the database {database!r}.')
|
@db_cli.command('gen-examples')
def gen_examples_command():
'Generate 2 incident examples in the database.'
execute_query('example_incidents.sql')
click.echo('Added 2 example incidents to the database.')
| 1,115,475,131,648,838,900
|
Generate 2 incident examples in the database.
|
backend/database/core.py
|
gen_examples_command
|
stianberghansen/police-data-trust
|
python
|
@db_cli.command('gen-examples')
def gen_examples_command():
execute_query('example_incidents.sql')
click.echo('Added 2 example incidents to the database.')
|
@db_cli.command('delete')
@click.option('--test-db', '-t', default=False, is_flag=True, help=f'Deletes the database {TestingConfig.POSTGRES_DB!r}.')
@pass_psql_admin_connection
@dev_only
def delete_database(conn: connection, test_db: bool):
'Delete the database.'
if test_db:
database = TestingConfig.POSTGRES_DB
else:
database = current_app.config['POSTGRES_DB']
cursor = conn.cursor()
cursor.execute('ROLLBACK')
if (database != TestingConfig.POSTGRES_DB):
click.echo(f'Are you sure you want to delete database {database!r}?')
click.echo((("Type in the database name '" + click.style(database, fg='red')) + "' to confirm"))
confirmation = click.prompt('Database name')
if (database != confirmation):
click.echo('The input does not match. The database will not be deleted.')
return None
try:
cursor.execute(f'DROP DATABASE {database};')
except psycopg2.errors.lookup('3D000'):
click.echo(f'Database {database!r} does not exist.')
cursor.execute('ROLLBACK')
else:
click.echo(f'Database {database!r} was deleted.')
| 2,724,078,116,972,836,000
|
Delete the database.
|
backend/database/core.py
|
delete_database
|
stianberghansen/police-data-trust
|
python
|
@db_cli.command('delete')
@click.option('--test-db', '-t', default=False, is_flag=True, help=f'Deletes the database {TestingConfig.POSTGRES_DB!r}.')
@pass_psql_admin_connection
@dev_only
def delete_database(conn: connection, test_db: bool):
if test_db:
database = TestingConfig.POSTGRES_DB
else:
database = current_app.config['POSTGRES_DB']
cursor = conn.cursor()
cursor.execute('ROLLBACK')
if (database != TestingConfig.POSTGRES_DB):
click.echo(f'Are you sure you want to delete database {database!r}?')
click.echo((("Type in the database name '" + click.style(database, fg='red')) + "' to confirm"))
confirmation = click.prompt('Database name')
if (database != confirmation):
click.echo('The input does not match. The database will not be deleted.')
return None
try:
cursor.execute(f'DROP DATABASE {database};')
except psycopg2.errors.lookup('3D000'):
click.echo(f'Database {database!r} does not exist.')
cursor.execute('ROLLBACK')
else:
click.echo(f'Database {database!r} was deleted.')
|
def params(q={}):
' default model parameters\n '
p = {}
p['tolNR'] = 1e-07
p['tend'] = 1.0
p['dtmax'] = 0.005
p['bndno'] = 17
p['bctype'] = 'pydeadloads'
p.update(q)
return p
| -4,516,981,005,381,893,000
|
default model parameters
|
tests/PFEM_Metafor/waterColoumnFallWithFlexibleObstacle_obstacle_Mtf_E_1_0e6_EAS.py
|
params
|
mlucio89/CUPyDO
|
python
|
def params(q={}):
' \n '
p = {}
p['tolNR'] = 1e-07
p['tend'] = 1.0
p['dtmax'] = 0.005
p['bndno'] = 17
p['bctype'] = 'pydeadloads'
p.update(q)
return p
|
def group(seq, groupSize, noneFill=True):
'Groups a given sequence into sublists of length groupSize.'
ret = []
L = []
i = groupSize
for elt in seq:
if (i > 0):
L.append(elt)
else:
ret.append(L)
i = groupSize
L = []
L.append(elt)
i -= 1
if L:
if noneFill:
while (len(L) < groupSize):
L.append(None)
ret.append(L)
return ret
| -6,485,224,228,336,238,000
|
Groups a given sequence into sublists of length groupSize.
|
plugins/String/test.py
|
group
|
AntumDeluge/Limnoria
|
python
|
def group(seq, groupSize, noneFill=True):
ret = []
L = []
i = groupSize
for elt in seq:
if (i > 0):
L.append(elt)
else:
ret.append(L)
i = groupSize
L = []
L.append(elt)
i -= 1
if L:
if noneFill:
while (len(L) < groupSize):
L.append(None)
ret.append(L)
return ret
|
def __init__(self, ci_estimator: Callable, alpha: float=0.05, init_graph: Union[(nx.Graph, ADMG)]=None, fixed_edges: nx.Graph=None, max_cond_set_size: int=None, **ci_estimator_kwargs):
'Peter and Clarke (PC) algorithm for causal discovery.\n\n Assumes causal sufficiency, that is, all confounders in the\n causal graph are observed variables.\n\n Parameters\n ----------\n ci_estimator : Callable\n The conditional independence test function. The arguments of the estimator should\n be data, node, node to compare, conditioning set of nodes, and any additional\n keyword arguments.\n alpha : float, optional\n The significance level for the conditional independence test, by default 0.05.\n init_graph : nx.Graph | ADMG, optional\n An initialized graph. If ``None``, then will initialize PC using a\n complete graph. By default None.\n fixed_edges : nx.Graph, optional\n An undirected graph with fixed edges. If ``None``, then will initialize PC using a\n complete graph. By default None.\n max_cond_set_size : int, optional\n Maximum size of the conditioning set, by default None. Used to limit\n the computation spent on the algorithm.\n ci_estimator_kwargs : dict\n Keyword arguments for the ``ci_estimator`` function.\n\n Attributes\n ----------\n graph_ : PAG\n The graph discovered.\n separating_sets_ : dict\n The dictionary of separating sets, where it is a nested dictionary from\n the variable name to the variable it is being compared to the set of\n variables in the graph that separate the two.\n '
super().__init__(ci_estimator, alpha, init_graph, fixed_edges, max_cond_set_size, **ci_estimator_kwargs)
| -6,205,691,413,641,967,000
|
Peter and Clarke (PC) algorithm for causal discovery.
Assumes causal sufficiency, that is, all confounders in the
causal graph are observed variables.
Parameters
----------
ci_estimator : Callable
The conditional independence test function. The arguments of the estimator should
be data, node, node to compare, conditioning set of nodes, and any additional
keyword arguments.
alpha : float, optional
The significance level for the conditional independence test, by default 0.05.
init_graph : nx.Graph | ADMG, optional
An initialized graph. If ``None``, then will initialize PC using a
complete graph. By default None.
fixed_edges : nx.Graph, optional
An undirected graph with fixed edges. If ``None``, then will initialize PC using a
complete graph. By default None.
max_cond_set_size : int, optional
Maximum size of the conditioning set, by default None. Used to limit
the computation spent on the algorithm.
ci_estimator_kwargs : dict
Keyword arguments for the ``ci_estimator`` function.
Attributes
----------
graph_ : PAG
The graph discovered.
separating_sets_ : dict
The dictionary of separating sets, where it is a nested dictionary from
the variable name to the variable it is being compared to the set of
variables in the graph that separate the two.
|
causal_networkx/discovery/pcalg.py
|
__init__
|
adam2392/causal-networkx
|
python
|
def __init__(self, ci_estimator: Callable, alpha: float=0.05, init_graph: Union[(nx.Graph, ADMG)]=None, fixed_edges: nx.Graph=None, max_cond_set_size: int=None, **ci_estimator_kwargs):
'Peter and Clarke (PC) algorithm for causal discovery.\n\n Assumes causal sufficiency, that is, all confounders in the\n causal graph are observed variables.\n\n Parameters\n ----------\n ci_estimator : Callable\n The conditional independence test function. The arguments of the estimator should\n be data, node, node to compare, conditioning set of nodes, and any additional\n keyword arguments.\n alpha : float, optional\n The significance level for the conditional independence test, by default 0.05.\n init_graph : nx.Graph | ADMG, optional\n An initialized graph. If ``None``, then will initialize PC using a\n complete graph. By default None.\n fixed_edges : nx.Graph, optional\n An undirected graph with fixed edges. If ``None``, then will initialize PC using a\n complete graph. By default None.\n max_cond_set_size : int, optional\n Maximum size of the conditioning set, by default None. Used to limit\n the computation spent on the algorithm.\n ci_estimator_kwargs : dict\n Keyword arguments for the ``ci_estimator`` function.\n\n Attributes\n ----------\n graph_ : PAG\n The graph discovered.\n separating_sets_ : dict\n The dictionary of separating sets, where it is a nested dictionary from\n the variable name to the variable it is being compared to the set of\n variables in the graph that separate the two.\n '
super().__init__(ci_estimator, alpha, init_graph, fixed_edges, max_cond_set_size, **ci_estimator_kwargs)
|
def learn_skeleton(self, X: pd.DataFrame) -> Tuple[(nx.Graph, Dict[(str, Dict[(str, Set)])])]:
'Learn skeleton from data.\n\n Parameters\n ----------\n X : pd.DataFrame\n Dataset.\n\n Returns\n -------\n skel_graph : nx.Graph\n The skeleton graph.\n sep_set : Dict[str, Dict[str, Set]]\n The separating set.\n '
(graph, sep_set, fixed_edges) = self._initialize_graph(X)
(skel_graph, sep_set) = self._learn_skeleton_from_neighbors(X, graph, sep_set, fixed_edges)
return (skel_graph, sep_set)
| 8,793,262,104,481,004,000
|
Learn skeleton from data.
Parameters
----------
X : pd.DataFrame
Dataset.
Returns
-------
skel_graph : nx.Graph
The skeleton graph.
sep_set : Dict[str, Dict[str, Set]]
The separating set.
|
causal_networkx/discovery/pcalg.py
|
learn_skeleton
|
adam2392/causal-networkx
|
python
|
def learn_skeleton(self, X: pd.DataFrame) -> Tuple[(nx.Graph, Dict[(str, Dict[(str, Set)])])]:
'Learn skeleton from data.\n\n Parameters\n ----------\n X : pd.DataFrame\n Dataset.\n\n Returns\n -------\n skel_graph : nx.Graph\n The skeleton graph.\n sep_set : Dict[str, Dict[str, Set]]\n The separating set.\n '
(graph, sep_set, fixed_edges) = self._initialize_graph(X)
(skel_graph, sep_set) = self._learn_skeleton_from_neighbors(X, graph, sep_set, fixed_edges)
return (skel_graph, sep_set)
|
def fit(self, X: pd.DataFrame) -> None:
"Fit PC algorithm on dataset 'X'."
(skel_graph, sep_set) = self.learn_skeleton(X)
graph = self._orient_edges(skel_graph, sep_set)
self.separating_sets_ = sep_set
self.graph_ = graph
| -6,594,347,806,976,311,000
|
Fit PC algorithm on dataset 'X'.
|
causal_networkx/discovery/pcalg.py
|
fit
|
adam2392/causal-networkx
|
python
|
def fit(self, X: pd.DataFrame) -> None:
(skel_graph, sep_set) = self.learn_skeleton(X)
graph = self._orient_edges(skel_graph, sep_set)
self.separating_sets_ = sep_set
self.graph_ = graph
|
def _orient_edges(self, skel_graph, sep_set):
'Orient edges in a skeleton graph to estimate the causal DAG, or CPDAG.\n\n Uses the separation sets to orient edges via conditional independence\n testing.\n\n Parameters\n ----------\n skel_graph : nx.Graph\n A skeleton graph. If ``None``, then will initialize PC using a\n complete graph. By default None.\n sep_set : _type_\n _description_\n '
dag = skel_graph.to_directed()
node_ids = skel_graph.nodes()
for (i, j) in combinations(node_ids, 2):
adj_i = set(dag.successors(i))
if (j in adj_i):
continue
adj_j = set(dag.successors(j))
if (i in adj_j):
continue
if (sep_set[i][j] is None):
continue
common_k = (adj_i & adj_j)
for k in common_k:
if (k not in sep_set[i][j]):
if dag.has_edge(k, i):
dag.remove_edge(k, i)
if dag.has_edge(k, j):
dag.remove_edge(k, j)
old_dag = dag.copy()
while True:
for (i, j) in combinations(node_ids, 2):
if _has_both_edges(dag, i, j):
for k in dag.predecessors(i):
if dag.has_edge(i, k):
continue
if _has_any_edge(dag, k, j):
continue
dag.remove_edge(j, i)
break
if _has_both_edges(dag, i, j):
succs_i = set()
for k in dag.successors(i):
if (not dag.has_edge(k, i)):
succs_i.add(k)
preds_j = set()
for k in dag.predecessors(j):
if (not dag.has_edge(j, k)):
preds_j.add(k)
if (len((succs_i & preds_j)) > 0):
dag.remove_edge(j, i)
if _has_both_edges(dag, i, j):
adj_i = set()
for k in dag.successors(i):
if dag.has_edge(k, i):
adj_i.add(k)
for (k, l) in combinations(adj_i, 2):
if _has_any_edge(dag, k, l):
continue
if (dag.has_edge(j, k) or (not dag.has_edge(k, j))):
continue
if (dag.has_edge(j, l) or (not dag.has_edge(l, j))):
continue
dag.remove_edge(j, i)
break
if nx.is_isomorphic(dag, old_dag):
break
old_dag = dag.copy()
return dag
| 7,184,094,386,730,885,000
|
Orient edges in a skeleton graph to estimate the causal DAG, or CPDAG.
Uses the separation sets to orient edges via conditional independence
testing.
Parameters
----------
skel_graph : nx.Graph
A skeleton graph. If ``None``, then will initialize PC using a
complete graph. By default None.
sep_set : _type_
_description_
|
causal_networkx/discovery/pcalg.py
|
_orient_edges
|
adam2392/causal-networkx
|
python
|
def _orient_edges(self, skel_graph, sep_set):
'Orient edges in a skeleton graph to estimate the causal DAG, or CPDAG.\n\n Uses the separation sets to orient edges via conditional independence\n testing.\n\n Parameters\n ----------\n skel_graph : nx.Graph\n A skeleton graph. If ``None``, then will initialize PC using a\n complete graph. By default None.\n sep_set : _type_\n _description_\n '
dag = skel_graph.to_directed()
node_ids = skel_graph.nodes()
for (i, j) in combinations(node_ids, 2):
adj_i = set(dag.successors(i))
if (j in adj_i):
continue
adj_j = set(dag.successors(j))
if (i in adj_j):
continue
if (sep_set[i][j] is None):
continue
common_k = (adj_i & adj_j)
for k in common_k:
if (k not in sep_set[i][j]):
if dag.has_edge(k, i):
dag.remove_edge(k, i)
if dag.has_edge(k, j):
dag.remove_edge(k, j)
old_dag = dag.copy()
while True:
for (i, j) in combinations(node_ids, 2):
if _has_both_edges(dag, i, j):
for k in dag.predecessors(i):
if dag.has_edge(i, k):
continue
if _has_any_edge(dag, k, j):
continue
dag.remove_edge(j, i)
break
if _has_both_edges(dag, i, j):
succs_i = set()
for k in dag.successors(i):
if (not dag.has_edge(k, i)):
succs_i.add(k)
preds_j = set()
for k in dag.predecessors(j):
if (not dag.has_edge(j, k)):
preds_j.add(k)
if (len((succs_i & preds_j)) > 0):
dag.remove_edge(j, i)
if _has_both_edges(dag, i, j):
adj_i = set()
for k in dag.successors(i):
if dag.has_edge(k, i):
adj_i.add(k)
for (k, l) in combinations(adj_i, 2):
if _has_any_edge(dag, k, l):
continue
if (dag.has_edge(j, k) or (not dag.has_edge(k, j))):
continue
if (dag.has_edge(j, l) or (not dag.has_edge(l, j))):
continue
dag.remove_edge(j, i)
break
if nx.is_isomorphic(dag, old_dag):
break
old_dag = dag.copy()
return dag
|
def brat_output(docgraph, layer=None, show_relations=True):
"\n converts a document graph with pointing chains into a string representation\n of a brat *.ann file.\n\n Parameters\n ----------\n docgraph : DiscourseDocumentGraph\n a document graph which might contain pointing chains (e.g. coreference links)\n layer : str or None\n the name of the layer that contains the pointing chains (e.g. 'mmax' or 'pocores').\n If unspecified, all pointing chains in the document will be considered\n\n Returns\n -------\n ret_str : unicode\n the content of a brat *.ann file\n "
if layer:
namespace = dg.layer2namespace(layer)
else:
namespace = docgraph.ns
ret_str = u''
pointing_chains = dg.get_pointing_chains(docgraph, layer=layer)
first_token2markables = defaultdict(list)
markable_dict = {}
markable_index = 1
for pointing_chain in pointing_chains:
for markable in sorted(pointing_chain, key=dg.util.natural_sort_key):
span_tokens = spanstring2tokens(docgraph, docgraph.node[markable][(namespace + ':span')])
span_text = dg.tokens2text(docgraph, span_tokens)
first_token2markables[span_tokens[0]].append(markable)
markable_dict[markable] = (markable_index, span_text, len(span_text))
markable_index += 1
onset = 0
for token_id in docgraph.tokens:
tok_len = len(docgraph.get_token(token_id))
if (token_id in first_token2markables):
for markable in first_token2markables[token_id]:
(mark_index, mark_text, mark_len) = markable_dict[markable]
ret_str += u'T{0}\tMarkable {1} {2}\t{3}\n'.format(mark_index, onset, (onset + mark_len), mark_text)
onset += (tok_len + 1)
if show_relations:
relation = 1
for pointing_chain in pointing_chains:
last_to_first_mention = sorted(pointing_chain, key=dg.util.natural_sort_key, reverse=True)
for i in xrange(0, (len(pointing_chain) - 1)):
chain_element = markable_dict[last_to_first_mention[i]][0]
prev_chain_element = markable_dict[last_to_first_mention[(i + 1)]][0]
ret_str += u'R{0}\tCoreference Arg1:T{1} Arg2:T{2}\n'.format(relation, chain_element, prev_chain_element)
relation += 1
return ret_str
| -8,608,024,915,250,498,000
|
converts a document graph with pointing chains into a string representation
of a brat *.ann file.
Parameters
----------
docgraph : DiscourseDocumentGraph
a document graph which might contain pointing chains (e.g. coreference links)
layer : str or None
the name of the layer that contains the pointing chains (e.g. 'mmax' or 'pocores').
If unspecified, all pointing chains in the document will be considered
Returns
-------
ret_str : unicode
the content of a brat *.ann file
|
src/discoursegraphs/readwrite/brat.py
|
brat_output
|
arne-cl/discoursegraphs
|
python
|
def brat_output(docgraph, layer=None, show_relations=True):
"\n converts a document graph with pointing chains into a string representation\n of a brat *.ann file.\n\n Parameters\n ----------\n docgraph : DiscourseDocumentGraph\n a document graph which might contain pointing chains (e.g. coreference links)\n layer : str or None\n the name of the layer that contains the pointing chains (e.g. 'mmax' or 'pocores').\n If unspecified, all pointing chains in the document will be considered\n\n Returns\n -------\n ret_str : unicode\n the content of a brat *.ann file\n "
if layer:
namespace = dg.layer2namespace(layer)
else:
namespace = docgraph.ns
ret_str = u
pointing_chains = dg.get_pointing_chains(docgraph, layer=layer)
first_token2markables = defaultdict(list)
markable_dict = {}
markable_index = 1
for pointing_chain in pointing_chains:
for markable in sorted(pointing_chain, key=dg.util.natural_sort_key):
span_tokens = spanstring2tokens(docgraph, docgraph.node[markable][(namespace + ':span')])
span_text = dg.tokens2text(docgraph, span_tokens)
first_token2markables[span_tokens[0]].append(markable)
markable_dict[markable] = (markable_index, span_text, len(span_text))
markable_index += 1
onset = 0
for token_id in docgraph.tokens:
tok_len = len(docgraph.get_token(token_id))
if (token_id in first_token2markables):
for markable in first_token2markables[token_id]:
(mark_index, mark_text, mark_len) = markable_dict[markable]
ret_str += u'T{0}\tMarkable {1} {2}\t{3}\n'.format(mark_index, onset, (onset + mark_len), mark_text)
onset += (tok_len + 1)
if show_relations:
relation = 1
for pointing_chain in pointing_chains:
last_to_first_mention = sorted(pointing_chain, key=dg.util.natural_sort_key, reverse=True)
for i in xrange(0, (len(pointing_chain) - 1)):
chain_element = markable_dict[last_to_first_mention[i]][0]
prev_chain_element = markable_dict[last_to_first_mention[(i + 1)]][0]
ret_str += u'R{0}\tCoreference Arg1:T{1} Arg2:T{2}\n'.format(relation, chain_element, prev_chain_element)
relation += 1
return ret_str
|
def create_visual_conf(docgraph, pointing_chains):
'\n creates a visual.conf file (as a string)\n for the given document graph.\n '
num_of_entities = len(pointing_chains)
mapsize = max(3, min(12, num_of_entities))
colormap = brewer2mpl.get_map(name='Paired', map_type='Qualitative', number=mapsize)
colors = (range(mapsize) * int(math.ceil((num_of_entities / float(mapsize)))))
endless_color_cycle = itertools.cycle(colors)
ret_str = u'[drawing]\n\n'
for chain in pointing_chains:
background_color = colormap.hex_colors[endless_color_cycle.next()]
for markable in chain:
span_tokens = spanstring2tokens(docgraph, docgraph.node[markable][(docgraph.ns + ':span')])
span_text = dg.tokens2text(docgraph, span_tokens)
ascii_markable = unidecode(span_text)
ret_str += u'{0}\tbgColor:{1}\n'.format(ascii_markable, background_color)
ret_str += '\n[labels]'
return ret_str
| 8,306,438,556,565,813,000
|
creates a visual.conf file (as a string)
for the given document graph.
|
src/discoursegraphs/readwrite/brat.py
|
create_visual_conf
|
arne-cl/discoursegraphs
|
python
|
def create_visual_conf(docgraph, pointing_chains):
'\n creates a visual.conf file (as a string)\n for the given document graph.\n '
num_of_entities = len(pointing_chains)
mapsize = max(3, min(12, num_of_entities))
colormap = brewer2mpl.get_map(name='Paired', map_type='Qualitative', number=mapsize)
colors = (range(mapsize) * int(math.ceil((num_of_entities / float(mapsize)))))
endless_color_cycle = itertools.cycle(colors)
ret_str = u'[drawing]\n\n'
for chain in pointing_chains:
background_color = colormap.hex_colors[endless_color_cycle.next()]
for markable in chain:
span_tokens = spanstring2tokens(docgraph, docgraph.node[markable][(docgraph.ns + ':span')])
span_text = dg.tokens2text(docgraph, span_tokens)
ascii_markable = unidecode(span_text)
ret_str += u'{0}\tbgColor:{1}\n'.format(ascii_markable, background_color)
ret_str += '\n[labels]'
return ret_str
|
def __init__(self, model: Optional[str]='bert-large-uncased', custom_model: PreTrainedModel=None, custom_tokenizer: PreTrainedTokenizer=None, hidden: Union[(List[int], int)]=(- 2), reduce_option: str='mean', sentence_handler: SentenceHandler=SentenceHandler(), random_state: int=12345, hidden_concat: bool=False, gpu_id: int=0):
'\n This is the parent Bert Summarizer model. New methods should implement this class.\n\n :param model: This parameter is associated with the inherit string parameters from the transformers library.\n :param custom_model: If you have a pre-trained model, you can add the model class here.\n :param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.\n :param hidden: This signifies which layer(s) of the BERT model you would like to use as embeddings.\n :param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.\n :param sentence_handler: The handler to process sentences. If want to use coreference, instantiate and pass.\n CoreferenceHandler instance\n :param random_state: The random state to reproduce summarizations.\n :param hidden_concat: Whether or not to concat multiple hidden layers.\n :param gpu_id: GPU device index if CUDA is available.\n '
model = BertEmbedding(model, custom_model, custom_tokenizer, gpu_id)
model_func = partial(model, hidden=hidden, reduce_option=reduce_option, hidden_concat=hidden_concat)
super().__init__(model_func, sentence_handler, random_state)
| -3,580,805,947,633,973,000
|
This is the parent Bert Summarizer model. New methods should implement this class.
:param model: This parameter is associated with the inherit string parameters from the transformers library.
:param custom_model: If you have a pre-trained model, you can add the model class here.
:param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.
:param hidden: This signifies which layer(s) of the BERT model you would like to use as embeddings.
:param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.
:param sentence_handler: The handler to process sentences. If want to use coreference, instantiate and pass.
CoreferenceHandler instance
:param random_state: The random state to reproduce summarizations.
:param hidden_concat: Whether or not to concat multiple hidden layers.
:param gpu_id: GPU device index if CUDA is available.
|
summarizer/bert.py
|
__init__
|
SelvinDatatonic/bert-extractive-summarizer
|
python
|
def __init__(self, model: Optional[str]='bert-large-uncased', custom_model: PreTrainedModel=None, custom_tokenizer: PreTrainedTokenizer=None, hidden: Union[(List[int], int)]=(- 2), reduce_option: str='mean', sentence_handler: SentenceHandler=SentenceHandler(), random_state: int=12345, hidden_concat: bool=False, gpu_id: int=0):
'\n This is the parent Bert Summarizer model. New methods should implement this class.\n\n :param model: This parameter is associated with the inherit string parameters from the transformers library.\n :param custom_model: If you have a pre-trained model, you can add the model class here.\n :param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.\n :param hidden: This signifies which layer(s) of the BERT model you would like to use as embeddings.\n :param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.\n :param sentence_handler: The handler to process sentences. If want to use coreference, instantiate and pass.\n CoreferenceHandler instance\n :param random_state: The random state to reproduce summarizations.\n :param hidden_concat: Whether or not to concat multiple hidden layers.\n :param gpu_id: GPU device index if CUDA is available.\n '
model = BertEmbedding(model, custom_model, custom_tokenizer, gpu_id)
model_func = partial(model, hidden=hidden, reduce_option=reduce_option, hidden_concat=hidden_concat)
super().__init__(model_func, sentence_handler, random_state)
|
def __init__(self, model: str='bert-large-uncased', custom_model: PreTrainedModel=None, custom_tokenizer: PreTrainedTokenizer=None, hidden: Union[(List[int], int)]=(- 2), reduce_option: str='mean', sentence_handler: SentenceHandler=SentenceHandler(), random_state: int=12345, hidden_concat: bool=False, gpu_id: int=0):
'\n This is the main Bert Summarizer class.\n\n :param model: This parameter is associated with the inherit string parameters from the transformers library.\n :param custom_model: If you have a pre-trained model, you can add the model class here.\n :param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.\n :param hidden: This signifies which layer of the BERT model you would like to use as embeddings.\n :param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.\n :param random_state: The random state to reproduce summarizations.\n :param hidden_concat: Whether or not to concat multiple hidden layers.\n :param gpu_id: GPU device index if CUDA is available. \n '
super(Summarizer, self).__init__(model, custom_model, custom_tokenizer, hidden, reduce_option, sentence_handler, random_state, hidden_concat, gpu_id)
| -517,203,160,611,884,800
|
This is the main Bert Summarizer class.
:param model: This parameter is associated with the inherit string parameters from the transformers library.
:param custom_model: If you have a pre-trained model, you can add the model class here.
:param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.
:param hidden: This signifies which layer of the BERT model you would like to use as embeddings.
:param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.
:param random_state: The random state to reproduce summarizations.
:param hidden_concat: Whether or not to concat multiple hidden layers.
:param gpu_id: GPU device index if CUDA is available.
|
summarizer/bert.py
|
__init__
|
SelvinDatatonic/bert-extractive-summarizer
|
python
|
def __init__(self, model: str='bert-large-uncased', custom_model: PreTrainedModel=None, custom_tokenizer: PreTrainedTokenizer=None, hidden: Union[(List[int], int)]=(- 2), reduce_option: str='mean', sentence_handler: SentenceHandler=SentenceHandler(), random_state: int=12345, hidden_concat: bool=False, gpu_id: int=0):
'\n This is the main Bert Summarizer class.\n\n :param model: This parameter is associated with the inherit string parameters from the transformers library.\n :param custom_model: If you have a pre-trained model, you can add the model class here.\n :param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.\n :param hidden: This signifies which layer of the BERT model you would like to use as embeddings.\n :param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.\n :param random_state: The random state to reproduce summarizations.\n :param hidden_concat: Whether or not to concat multiple hidden layers.\n :param gpu_id: GPU device index if CUDA is available. \n '
super(Summarizer, self).__init__(model, custom_model, custom_tokenizer, hidden, reduce_option, sentence_handler, random_state, hidden_concat, gpu_id)
|
def __init__(self, transformer_type: str='Bert', transformer_model_key: str='bert-base-uncased', transformer_tokenizer_key: str=None, hidden: Union[(List[int], int)]=(- 2), reduce_option: str='mean', sentence_handler: SentenceHandler=SentenceHandler(), random_state: int=12345, hidden_concat: bool=False, gpu_id: int=0):
'\n :param transformer_type: The Transformer type, such as Bert, GPT2, DistilBert, etc.\n :param transformer_model_key: The transformer model key. This is the directory for the model.\n :param transformer_tokenizer_key: The transformer tokenizer key. This is the tokenizer directory.\n :param hidden: The hidden output layers to use for the summarization.\n :param reduce_option: The reduce option, such as mean, max, min, median, etc.\n :param sentence_handler: The sentence handler class to process the raw text.\n :param random_state: The random state to use.\n :param hidden_concat: Deprecated hidden concat option.\n :param gpu_id: GPU device index if CUDA is available. \n '
try:
self.MODEL_DICT['Roberta'] = (RobertaModel, RobertaTokenizer)
self.MODEL_DICT['Albert'] = (AlbertModel, AlbertTokenizer)
self.MODEL_DICT['Camembert'] = (CamembertModel, CamembertTokenizer)
self.MODEL_DICT['Bart'] = (BartModel, BartTokenizer)
self.MODEL_DICT['Longformer'] = (LongformerModel, LongformerTokenizer)
self.MODEL_DICT['BigBird'] = (BigBirdModel, BigBirdTokenizer)
except Exception:
pass
(model_clz, tokenizer_clz) = self.MODEL_DICT[transformer_type]
model = model_clz.from_pretrained(transformer_model_key, output_hidden_states=True)
tokenizer = tokenizer_clz.from_pretrained((transformer_tokenizer_key if (transformer_tokenizer_key is not None) else transformer_model_key))
super().__init__(None, model, tokenizer, hidden, reduce_option, sentence_handler, random_state, hidden_concat, gpu_id)
| 8,566,804,421,610,568,000
|
:param transformer_type: The Transformer type, such as Bert, GPT2, DistilBert, etc.
:param transformer_model_key: The transformer model key. This is the directory for the model.
:param transformer_tokenizer_key: The transformer tokenizer key. This is the tokenizer directory.
:param hidden: The hidden output layers to use for the summarization.
:param reduce_option: The reduce option, such as mean, max, min, median, etc.
:param sentence_handler: The sentence handler class to process the raw text.
:param random_state: The random state to use.
:param hidden_concat: Deprecated hidden concat option.
:param gpu_id: GPU device index if CUDA is available.
|
summarizer/bert.py
|
__init__
|
SelvinDatatonic/bert-extractive-summarizer
|
python
|
def __init__(self, transformer_type: str='Bert', transformer_model_key: str='bert-base-uncased', transformer_tokenizer_key: str=None, hidden: Union[(List[int], int)]=(- 2), reduce_option: str='mean', sentence_handler: SentenceHandler=SentenceHandler(), random_state: int=12345, hidden_concat: bool=False, gpu_id: int=0):
'\n :param transformer_type: The Transformer type, such as Bert, GPT2, DistilBert, etc.\n :param transformer_model_key: The transformer model key. This is the directory for the model.\n :param transformer_tokenizer_key: The transformer tokenizer key. This is the tokenizer directory.\n :param hidden: The hidden output layers to use for the summarization.\n :param reduce_option: The reduce option, such as mean, max, min, median, etc.\n :param sentence_handler: The sentence handler class to process the raw text.\n :param random_state: The random state to use.\n :param hidden_concat: Deprecated hidden concat option.\n :param gpu_id: GPU device index if CUDA is available. \n '
try:
self.MODEL_DICT['Roberta'] = (RobertaModel, RobertaTokenizer)
self.MODEL_DICT['Albert'] = (AlbertModel, AlbertTokenizer)
self.MODEL_DICT['Camembert'] = (CamembertModel, CamembertTokenizer)
self.MODEL_DICT['Bart'] = (BartModel, BartTokenizer)
self.MODEL_DICT['Longformer'] = (LongformerModel, LongformerTokenizer)
self.MODEL_DICT['BigBird'] = (BigBirdModel, BigBirdTokenizer)
except Exception:
pass
(model_clz, tokenizer_clz) = self.MODEL_DICT[transformer_type]
model = model_clz.from_pretrained(transformer_model_key, output_hidden_states=True)
tokenizer = tokenizer_clz.from_pretrained((transformer_tokenizer_key if (transformer_tokenizer_key is not None) else transformer_model_key))
super().__init__(None, model, tokenizer, hidden, reduce_option, sentence_handler, random_state, hidden_concat, gpu_id)
|
def fit_predict(self, graph, weights):
"Fits model to a given graph and weights list\n\n Sets :code:`self.model_` to the state of graphtool's Stochastic Block Model the after fitting.\n\n Attributes\n ----------\n graph: graphtool.Graph\n the graph to fit the model to\n weights: graphtool.EdgePropertyMap<double>\n the property map: edge -> weight (double) to fit the model to, if weighted variant\n is selected\n\n Returns\n -------\n numpy.ndarray\n partition of labels, each sublist contains label indices\n related to label positions in :code:`y`\n "
if self.weight_model:
self.model_ = self._model_fit_function()(graph, deg_corr=self.use_degree_correlation, overlap=self.allow_overlap, state_args=dict(recs=[weights], rec_types=[self.weight_model]))
else:
self.model_ = self._model_fit_function()(graph, deg_corr=self.use_degree_correlation, overlap=self.allow_overlap)
return self._detect_communities()
| -5,015,516,649,301,911,000
|
Fits model to a given graph and weights list
Sets :code:`self.model_` to the state of graphtool's Stochastic Block Model the after fitting.
Attributes
----------
graph: graphtool.Graph
the graph to fit the model to
weights: graphtool.EdgePropertyMap<double>
the property map: edge -> weight (double) to fit the model to, if weighted variant
is selected
Returns
-------
numpy.ndarray
partition of labels, each sublist contains label indices
related to label positions in :code:`y`
|
yyskmultilearn/cluster/graphtool.py
|
fit_predict
|
yuan776/scikit-multilearn
|
python
|
def fit_predict(self, graph, weights):
"Fits model to a given graph and weights list\n\n Sets :code:`self.model_` to the state of graphtool's Stochastic Block Model the after fitting.\n\n Attributes\n ----------\n graph: graphtool.Graph\n the graph to fit the model to\n weights: graphtool.EdgePropertyMap<double>\n the property map: edge -> weight (double) to fit the model to, if weighted variant\n is selected\n\n Returns\n -------\n numpy.ndarray\n partition of labels, each sublist contains label indices\n related to label positions in :code:`y`\n "
if self.weight_model:
self.model_ = self._model_fit_function()(graph, deg_corr=self.use_degree_correlation, overlap=self.allow_overlap, state_args=dict(recs=[weights], rec_types=[self.weight_model]))
else:
self.model_ = self._model_fit_function()(graph, deg_corr=self.use_degree_correlation, overlap=self.allow_overlap)
return self._detect_communities()
|
def fit_predict(self, X, y):
"Performs clustering on y and returns list of label lists\n\n Builds a label graph using the provided graph builder's `transform` method\n on `y` and then detects communities using the selected `method`.\n\n Sets :code:`self.weights_` and :code:`self.graph_`.\n\n Parameters\n ----------\n X : None\n currently unused, left for scikit compatibility\n y : scipy.sparse\n label space of shape :code:`(n_samples, n_labels)`\n\n Returns\n -------\n arrray of arrays of label indexes (numpy.ndarray)\n label space division, each sublist represents labels that are in that community\n "
self._build_graph_instance(y)
clusters = self.model.fit_predict(self.graph_, weights=self.weights_)
return np.array([community for community in clusters if (len(community) > 0)])
| 6,311,946,468,977,824,000
|
Performs clustering on y and returns list of label lists
Builds a label graph using the provided graph builder's `transform` method
on `y` and then detects communities using the selected `method`.
Sets :code:`self.weights_` and :code:`self.graph_`.
Parameters
----------
X : None
currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
|
yyskmultilearn/cluster/graphtool.py
|
fit_predict
|
yuan776/scikit-multilearn
|
python
|
def fit_predict(self, X, y):
"Performs clustering on y and returns list of label lists\n\n Builds a label graph using the provided graph builder's `transform` method\n on `y` and then detects communities using the selected `method`.\n\n Sets :code:`self.weights_` and :code:`self.graph_`.\n\n Parameters\n ----------\n X : None\n currently unused, left for scikit compatibility\n y : scipy.sparse\n label space of shape :code:`(n_samples, n_labels)`\n\n Returns\n -------\n arrray of arrays of label indexes (numpy.ndarray)\n label space division, each sublist represents labels that are in that community\n "
self._build_graph_instance(y)
clusters = self.model.fit_predict(self.graph_, weights=self.weights_)
return np.array([community for community in clusters if (len(community) > 0)])
|
def __init__(self, x=0, y=0):
'\n Método de inicialização da classe. Deve inicializar os parâmetros x, y, caracter e status\n\n :param x: Posição horizontal inicial do ator\n :param y: Posição vertical inicial do ator\n '
self.y = y
self.x = x
self.status = ATIVO
| 5,349,596,299,584,515,000
|
Método de inicialização da classe. Deve inicializar os parâmetros x, y, caracter e status
:param x: Posição horizontal inicial do ator
:param y: Posição vertical inicial do ator
|
atores.py
|
__init__
|
NTMaia/pythonbirds
|
python
|
def __init__(self, x=0, y=0):
'\n Método de inicialização da classe. Deve inicializar os parâmetros x, y, caracter e status\n\n :param x: Posição horizontal inicial do ator\n :param y: Posição vertical inicial do ator\n '
self.y = y
self.x = x
self.status = ATIVO
|
def calcular_posicao(self, tempo):
'\n Método que calcula a posição do ator em determinado tempo.\n Deve-se imaginar que o tempo começa em 0 e avança de 0,01 segundos\n\n :param tempo: o tempo do jogo\n :return: posição x, y do ator\n '
return (self.x, self.y)
| -765,579,988,735,161,700
|
Método que calcula a posição do ator em determinado tempo.
Deve-se imaginar que o tempo começa em 0 e avança de 0,01 segundos
:param tempo: o tempo do jogo
:return: posição x, y do ator
|
atores.py
|
calcular_posicao
|
NTMaia/pythonbirds
|
python
|
def calcular_posicao(self, tempo):
'\n Método que calcula a posição do ator em determinado tempo.\n Deve-se imaginar que o tempo começa em 0 e avança de 0,01 segundos\n\n :param tempo: o tempo do jogo\n :return: posição x, y do ator\n '
return (self.x, self.y)
|
def colidir(self, outro_ator, intervalo=1):
'\n Método que executa lógica de colisão entre dois atores.\n Só deve haver colisão se os dois atores tiverem seus status ativos.\n Para colisão, é considerado um quadrado, com lado igual ao parâmetro intervalo, em volta do ponto onde se\n encontra o ator. Se os atores estiverem dentro desse mesmo quadrado, seus status devem ser alterados para\n destruido, seus caracteres para destruido também.\n\n :param outro_ator: Ator a ser considerado na colisão\n :param intervalo: Intervalo a ser considerado\n :return:\n '
if ((self.status == ATIVO) and (outro_ator.status == ATIVO)):
delta_x = abs((self.x - outro_ator.x))
delta_y = abs((self.y - outro_ator.y))
if ((delta_x <= intervalo) and (delta_y <= intervalo)):
self.status = outro_ator.status = DESTRUIDO
| 8,285,704,329,622,607,000
|
Método que executa lógica de colisão entre dois atores.
Só deve haver colisão se os dois atores tiverem seus status ativos.
Para colisão, é considerado um quadrado, com lado igual ao parâmetro intervalo, em volta do ponto onde se
encontra o ator. Se os atores estiverem dentro desse mesmo quadrado, seus status devem ser alterados para
destruido, seus caracteres para destruido também.
:param outro_ator: Ator a ser considerado na colisão
:param intervalo: Intervalo a ser considerado
:return:
|
atores.py
|
colidir
|
NTMaia/pythonbirds
|
python
|
def colidir(self, outro_ator, intervalo=1):
'\n Método que executa lógica de colisão entre dois atores.\n Só deve haver colisão se os dois atores tiverem seus status ativos.\n Para colisão, é considerado um quadrado, com lado igual ao parâmetro intervalo, em volta do ponto onde se\n encontra o ator. Se os atores estiverem dentro desse mesmo quadrado, seus status devem ser alterados para\n destruido, seus caracteres para destruido também.\n\n :param outro_ator: Ator a ser considerado na colisão\n :param intervalo: Intervalo a ser considerado\n :return:\n '
if ((self.status == ATIVO) and (outro_ator.status == ATIVO)):
delta_x = abs((self.x - outro_ator.x))
delta_y = abs((self.y - outro_ator.y))
if ((delta_x <= intervalo) and (delta_y <= intervalo)):
self.status = outro_ator.status = DESTRUIDO
|
def __init__(self, x=0, y=0):
'\n Método de inicialização de pássaro.\n\n Deve chamar a inicialização de ator. Além disso, deve armazenar a posição inicial e incializar o tempo de\n lançamento e angulo de lançamento\n\n :param x:\n :param y:\n '
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None
| 1,180,524,789,762,413,600
|
Método de inicialização de pássaro.
Deve chamar a inicialização de ator. Além disso, deve armazenar a posição inicial e incializar o tempo de
lançamento e angulo de lançamento
:param x:
:param y:
|
atores.py
|
__init__
|
NTMaia/pythonbirds
|
python
|
def __init__(self, x=0, y=0):
'\n Método de inicialização de pássaro.\n\n Deve chamar a inicialização de ator. Além disso, deve armazenar a posição inicial e incializar o tempo de\n lançamento e angulo de lançamento\n\n :param x:\n :param y:\n '
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None
|
def foi_lancado(self):
'\n Método que retorna verdaeira se o pássaro já foi lançado e falso caso contrário\n\n :return: booleano\n '
return (not (self._tempo_de_lancamento is None))
| 1,224,626,318,226,593,300
|
Método que retorna verdaeira se o pássaro já foi lançado e falso caso contrário
:return: booleano
|
atores.py
|
foi_lancado
|
NTMaia/pythonbirds
|
python
|
def foi_lancado(self):
'\n Método que retorna verdaeira se o pássaro já foi lançado e falso caso contrário\n\n :return: booleano\n '
return (not (self._tempo_de_lancamento is None))
|
def colidir_com_chao(self):
'\n Método que executa lógica de colisão com o chão. Toda vez que y for menor ou igual a 0,\n o status dos Passaro deve ser alterado para destruido, bem como o seu caracter\n\n '
pass
| 3,603,915,454,034,998,300
|
Método que executa lógica de colisão com o chão. Toda vez que y for menor ou igual a 0,
o status dos Passaro deve ser alterado para destruido, bem como o seu caracter
|
atores.py
|
colidir_com_chao
|
NTMaia/pythonbirds
|
python
|
def colidir_com_chao(self):
'\n Método que executa lógica de colisão com o chão. Toda vez que y for menor ou igual a 0,\n o status dos Passaro deve ser alterado para destruido, bem como o seu caracter\n\n '
pass
|
def calcular_posicao(self, tempo):
'\n Método que cálcula a posição do passaro de acordo com o tempo.\n\n Antes do lançamento o pássaro deve retornar o valor de sua posição inicial\n\n Depois do lançamento o pássaro deve calcular de acordo com sua posição inicial, velocidade escalar,\n ângulo de lancamento, gravidade (constante GRAVIDADE) e o tempo do jogo.\n\n Após a colisão, ou seja, ter seus status destruido, o pássaro deve apenas retornar a última posição calculada.\n\n :param tempo: tempo de jogo a ser calculada a posição\n :return: posição x, y\n '
if self.foi_lancado():
delta_t = (tempo - self._tempo_de_lancamento)
self._calcular_posicao_vertical(delta_t)
return super().calcular_posicao(tempo)
| -5,593,341,986,273,874,000
|
Método que cálcula a posição do passaro de acordo com o tempo.
Antes do lançamento o pássaro deve retornar o valor de sua posição inicial
Depois do lançamento o pássaro deve calcular de acordo com sua posição inicial, velocidade escalar,
ângulo de lancamento, gravidade (constante GRAVIDADE) e o tempo do jogo.
Após a colisão, ou seja, ter seus status destruido, o pássaro deve apenas retornar a última posição calculada.
:param tempo: tempo de jogo a ser calculada a posição
:return: posição x, y
|
atores.py
|
calcular_posicao
|
NTMaia/pythonbirds
|
python
|
def calcular_posicao(self, tempo):
'\n Método que cálcula a posição do passaro de acordo com o tempo.\n\n Antes do lançamento o pássaro deve retornar o valor de sua posição inicial\n\n Depois do lançamento o pássaro deve calcular de acordo com sua posição inicial, velocidade escalar,\n ângulo de lancamento, gravidade (constante GRAVIDADE) e o tempo do jogo.\n\n Após a colisão, ou seja, ter seus status destruido, o pássaro deve apenas retornar a última posição calculada.\n\n :param tempo: tempo de jogo a ser calculada a posição\n :return: posição x, y\n '
if self.foi_lancado():
delta_t = (tempo - self._tempo_de_lancamento)
self._calcular_posicao_vertical(delta_t)
return super().calcular_posicao(tempo)
|
def lancar(self, angulo, tempo_de_lancamento):
'\n Lógica que lança o pássaro. Deve armazenar o ângulo e o tempo de lançamento para posteriores cálculo.\n O ângulo é passado em graus e deve ser transformado em radianos\n\n :param angulo:\n :param tempo_de_lancamento:\n :return:\n '
self._angulo_de_lancamento = angulo
self._tempo_de_lancamento = tempo_de_lancamento
| 3,750,786,522,094,327,000
|
Lógica que lança o pássaro. Deve armazenar o ângulo e o tempo de lançamento para posteriores cálculo.
O ângulo é passado em graus e deve ser transformado em radianos
:param angulo:
:param tempo_de_lancamento:
:return:
|
atores.py
|
lancar
|
NTMaia/pythonbirds
|
python
|
def lancar(self, angulo, tempo_de_lancamento):
'\n Lógica que lança o pássaro. Deve armazenar o ângulo e o tempo de lançamento para posteriores cálculo.\n O ângulo é passado em graus e deve ser transformado em radianos\n\n :param angulo:\n :param tempo_de_lancamento:\n :return:\n '
self._angulo_de_lancamento = angulo
self._tempo_de_lancamento = tempo_de_lancamento
|
def _fetch_repos_file(url, filename, job):
'Use curl to fetch a repos file and display the contents.'
job.run(['curl', '-skL', url, '-o', filename])
log(('@{bf}==>@| Contents of `%s`:' % filename))
with open(filename, 'r') as f:
print(f.read())
| -366,778,116,241,786,200
|
Use curl to fetch a repos file and display the contents.
|
ros2_batch_job/__main__.py
|
_fetch_repos_file
|
jlblancoc/ci
|
python
|
def _fetch_repos_file(url, filename, job):
job.run(['curl', '-skL', url, '-o', filename])
log(('@{bf}==>@| Contents of `%s`:' % filename))
with open(filename, 'r') as f:
print(f.read())
|
@contextmanager
def context_session(self) -> Iterator[SqlSession]:
'Override base method to include exception handling.'
try:
(yield from self.get_db())
except sa.exc.StatementError as e:
if isinstance(e.orig, psycopg2.errors.UniqueViolation):
raise errors.ConflictError('resource already exists') from e
elif isinstance(e.orig, psycopg2.errors.ForeignKeyViolation):
raise errors.ForeignKeyError('collection does not exist') from e
logger.error(e, exc_info=True)
raise errors.DatabaseError('unhandled database error')
| -4,033,125,691,219,364,000
|
Override base method to include exception handling.
|
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/session.py
|
context_session
|
AsgerPetersen/stac-fastapi
|
python
|
@contextmanager
def context_session(self) -> Iterator[SqlSession]:
try:
(yield from self.get_db())
except sa.exc.StatementError as e:
if isinstance(e.orig, psycopg2.errors.UniqueViolation):
raise errors.ConflictError('resource already exists') from e
elif isinstance(e.orig, psycopg2.errors.ForeignKeyViolation):
raise errors.ForeignKeyError('collection does not exist') from e
logger.error(e, exc_info=True)
raise errors.DatabaseError('unhandled database error')
|
@classmethod
def create_from_env(cls):
'Create from environment.'
return cls(reader_conn_string=os.environ['READER_CONN_STRING'], writer_conn_string=os.environ['WRITER_CONN_STRING'])
| 5,541,047,790,610,243,000
|
Create from environment.
|
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/session.py
|
create_from_env
|
AsgerPetersen/stac-fastapi
|
python
|
@classmethod
def create_from_env(cls):
return cls(reader_conn_string=os.environ['READER_CONN_STRING'], writer_conn_string=os.environ['WRITER_CONN_STRING'])
|
@classmethod
def create_from_settings(cls, settings: SqlalchemySettings) -> 'Session':
'Create a Session object from settings.'
return cls(reader_conn_string=settings.reader_connection_string, writer_conn_string=settings.writer_connection_string)
| 591,955,616,614,499,500
|
Create a Session object from settings.
|
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/session.py
|
create_from_settings
|
AsgerPetersen/stac-fastapi
|
python
|
@classmethod
def create_from_settings(cls, settings: SqlalchemySettings) -> 'Session':
return cls(reader_conn_string=settings.reader_connection_string, writer_conn_string=settings.writer_connection_string)
|
def __attrs_post_init__(self):
'Post init handler.'
self.reader: FastAPISessionMaker = FastAPISessionMaker(self.reader_conn_string)
self.writer: FastAPISessionMaker = FastAPISessionMaker(self.writer_conn_string)
| 9,184,957,250,236,342,000
|
Post init handler.
|
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/session.py
|
__attrs_post_init__
|
AsgerPetersen/stac-fastapi
|
python
|
def __attrs_post_init__(self):
self.reader: FastAPISessionMaker = FastAPISessionMaker(self.reader_conn_string)
self.writer: FastAPISessionMaker = FastAPISessionMaker(self.writer_conn_string)
|
def MycobacteriumSp15544247(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph:
'Return new instance of the Mycobacterium sp. 1554424.7 graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Mycobacterium sp. 1554424.7 graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n '
return AutomaticallyRetrievedGraph(graph_name='MycobacteriumSp15544247', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
| 7,044,234,832,566,808,000
|
Return new instance of the Mycobacterium sp. 1554424.7 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Mycobacterium sp. 1554424.7 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
|
bindings/python/ensmallen/datasets/string/mycobacteriumsp15544247.py
|
MycobacteriumSp15544247
|
AnacletoLAB/ensmallen
|
python
|
def MycobacteriumSp15544247(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph:
'Return new instance of the Mycobacterium sp. 1554424.7 graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Mycobacterium sp. 1554424.7 graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n '
return AutomaticallyRetrievedGraph(graph_name='MycobacteriumSp15544247', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
|
@format_response
@handle_exceptions
async def get_tasks(self, request):
'\n ---\n description: get all tasks associated with the specified step.\n tags:\n - Tasks\n parameters:\n - name: "flow_id"\n in: "path"\n description: "flow_id"\n required: true\n type: "string"\n - name: "run_number"\n in: "path"\n description: "run_number"\n required: true\n type: "integer"\n - name: "step_name"\n in: "path"\n description: "step_name"\n required: true\n type: "string"\n produces:\n - text/plain\n responses:\n "200":\n description: successful operation. Return tasks\n "405":\n description: invalid HTTP Method\n '
flow_name = request.match_info.get('flow_id')
run_number = request.match_info.get('run_number')
step_name = request.match_info.get('step_name')
return (await self._async_table.get_tasks(flow_name, run_number, step_name))
| 8,151,153,275,299,854,000
|
---
description: get all tasks associated with the specified step.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: successful operation. Return tasks
"405":
description: invalid HTTP Method
|
metadata_service/api/task.py
|
get_tasks
|
ferras/metaflow-service-clone
|
python
|
@format_response
@handle_exceptions
async def get_tasks(self, request):
'\n ---\n description: get all tasks associated with the specified step.\n tags:\n - Tasks\n parameters:\n - name: "flow_id"\n in: "path"\n description: "flow_id"\n required: true\n type: "string"\n - name: "run_number"\n in: "path"\n description: "run_number"\n required: true\n type: "integer"\n - name: "step_name"\n in: "path"\n description: "step_name"\n required: true\n type: "string"\n produces:\n - text/plain\n responses:\n "200":\n description: successful operation. Return tasks\n "405":\n description: invalid HTTP Method\n '
flow_name = request.match_info.get('flow_id')
run_number = request.match_info.get('run_number')
step_name = request.match_info.get('step_name')
return (await self._async_table.get_tasks(flow_name, run_number, step_name))
|
@format_response
@handle_exceptions
async def get_task(self, request):
'\n ---\n description: get all artifacts associated with the specified task.\n tags:\n - Tasks\n parameters:\n - name: "flow_id"\n in: "path"\n description: "flow_id"\n required: true\n type: "string"\n - name: "run_number"\n in: "path"\n description: "run_number"\n required: true\n type: "integer"\n - name: "step_name"\n in: "path"\n description: "step_name"\n required: true\n type: "string"\n - name: "task_id"\n in: "path"\n description: "task_id"\n required: true\n type: "integer"\n produces:\n - text/plain\n responses:\n "200":\n description: successful operation. Return task\n "405":\n description: invalid HTTP Method\n '
flow_name = request.match_info.get('flow_id')
run_number = request.match_info.get('run_number')
step_name = request.match_info.get('step_name')
task_id = request.match_info.get('task_id')
return (await self._async_table.get_task(flow_name, run_number, step_name, task_id))
| -9,094,517,948,369,713,000
|
---
description: get all artifacts associated with the specified task.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
- name: "task_id"
in: "path"
description: "task_id"
required: true
type: "integer"
produces:
- text/plain
responses:
"200":
description: successful operation. Return task
"405":
description: invalid HTTP Method
|
metadata_service/api/task.py
|
get_task
|
ferras/metaflow-service-clone
|
python
|
@format_response
@handle_exceptions
async def get_task(self, request):
'\n ---\n description: get all artifacts associated with the specified task.\n tags:\n - Tasks\n parameters:\n - name: "flow_id"\n in: "path"\n description: "flow_id"\n required: true\n type: "string"\n - name: "run_number"\n in: "path"\n description: "run_number"\n required: true\n type: "integer"\n - name: "step_name"\n in: "path"\n description: "step_name"\n required: true\n type: "string"\n - name: "task_id"\n in: "path"\n description: "task_id"\n required: true\n type: "integer"\n produces:\n - text/plain\n responses:\n "200":\n description: successful operation. Return task\n "405":\n description: invalid HTTP Method\n '
flow_name = request.match_info.get('flow_id')
run_number = request.match_info.get('run_number')
step_name = request.match_info.get('step_name')
task_id = request.match_info.get('task_id')
return (await self._async_table.get_task(flow_name, run_number, step_name, task_id))
|
@format_response
@handle_exceptions
async def create_task(self, request):
'\n ---\n description: This end-point allow to test that service is up.\n tags:\n - Tasks\n parameters:\n - name: "flow_id"\n in: "path"\n description: "flow_id"\n required: true\n type: "string"\n - name: "run_number"\n in: "path"\n description: "run_number"\n required: true\n type: "integer"\n - name: "step_name"\n in: "path"\n description: "step_name"\n required: true\n type: "string"\n - name: "body"\n in: "body"\n description: "body"\n required: true\n schema:\n type: object\n properties:\n user_name:\n type: string\n tags:\n type: object\n system_tags:\n type: object\n produces:\n - \'text/plain\'\n responses:\n "202":\n description: successful operation. Return newly registered task\n "405":\n description: invalid HTTP Method\n '
flow_id = request.match_info.get('flow_id')
run_number = request.match_info.get('run_number')
step_name = request.match_info.get('step_name')
body = (await read_body(request.content))
user = body.get('user_name')
tags = body.get('tags')
system_tags = body.get('system_tags')
task = TaskRow(flow_id=flow_id, run_number=run_number, step_name=step_name, user_name=user, tags=tags, system_tags=system_tags)
return (await self._async_table.add_task(task))
| -7,596,690,592,353,255,000
|
---
description: This end-point allow to test that service is up.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
- name: "body"
in: "body"
description: "body"
required: true
schema:
type: object
properties:
user_name:
type: string
tags:
type: object
system_tags:
type: object
produces:
- 'text/plain'
responses:
"202":
description: successful operation. Return newly registered task
"405":
description: invalid HTTP Method
|
metadata_service/api/task.py
|
create_task
|
ferras/metaflow-service-clone
|
python
|
@format_response
@handle_exceptions
async def create_task(self, request):
'\n ---\n description: This end-point allow to test that service is up.\n tags:\n - Tasks\n parameters:\n - name: "flow_id"\n in: "path"\n description: "flow_id"\n required: true\n type: "string"\n - name: "run_number"\n in: "path"\n description: "run_number"\n required: true\n type: "integer"\n - name: "step_name"\n in: "path"\n description: "step_name"\n required: true\n type: "string"\n - name: "body"\n in: "body"\n description: "body"\n required: true\n schema:\n type: object\n properties:\n user_name:\n type: string\n tags:\n type: object\n system_tags:\n type: object\n produces:\n - \'text/plain\'\n responses:\n "202":\n description: successful operation. Return newly registered task\n "405":\n description: invalid HTTP Method\n '
flow_id = request.match_info.get('flow_id')
run_number = request.match_info.get('run_number')
step_name = request.match_info.get('step_name')
body = (await read_body(request.content))
user = body.get('user_name')
tags = body.get('tags')
system_tags = body.get('system_tags')
task = TaskRow(flow_id=flow_id, run_number=run_number, step_name=step_name, user_name=user, tags=tags, system_tags=system_tags)
return (await self._async_table.add_task(task))
|
def csv_to_generator(csv_file_path):
'\n Parse your CSV file into generator\n '
for row in DictReader(open(csv_file_path, 'r')):
point = Point('financial-analysis').tag('type', 'vix-daily').field('open', float(row['VIX Open'])).field('high', float(row['VIX High'])).field('low', float(row['VIX Low'])).field('close', float(row['VIX Close'])).time(row['Date'])
(yield point)
| 8,230,420,822,704,317,000
|
Parse your CSV file into generator
|
examples/asynchronous_batching.py
|
csv_to_generator
|
bonitoo-io/influxdb-client-python
|
python
|
def csv_to_generator(csv_file_path):
'\n \n '
for row in DictReader(open(csv_file_path, 'r')):
point = Point('financial-analysis').tag('type', 'vix-daily').field('open', float(row['VIX Open'])).field('high', float(row['VIX High'])).field('low', float(row['VIX Low'])).field('close', float(row['VIX Close'])).time(row['Date'])
(yield point)
|
async def async_write(batch):
'\n Prepare async task\n '
(await write_api.write(bucket='my-bucket', record=batch))
return batch
| 3,711,358,592,541,441,000
|
Prepare async task
|
examples/asynchronous_batching.py
|
async_write
|
bonitoo-io/influxdb-client-python
|
python
|
async def async_write(batch):
'\n \n '
(await write_api.write(bucket='my-bucket', record=batch))
return batch
|
def __get_language_extensions(self):
'\n :returns: A directory of the considered language extensions\n '
return {'C++': self.cpp_extensions, 'C': self.c_extensions, 'Rust': self.rust_extensions, 'Ruby': self.ruby_extensions, 'Java': self.java_extensions, 'Go': self.go_extensions, 'PHP': self.php_extensions, 'JavaScript': self.js_extensions, 'Objective-C': self.objective_c_extensions, 'Swift': self.swift_extensions, 'C#': self.c_sharp_extensions, 'Python': self.python_extensions}
| -3,165,527,756,211,458,000
|
:returns: A directory of the considered language extensions
|
gitScrabber/scrabTasks/file/languageDetector.py
|
__get_language_extensions
|
Eyenseo/gitScrabber
|
python
|
def __get_language_extensions(self):
'\n \n '
return {'C++': self.cpp_extensions, 'C': self.c_extensions, 'Rust': self.rust_extensions, 'Ruby': self.ruby_extensions, 'Java': self.java_extensions, 'Go': self.go_extensions, 'PHP': self.php_extensions, 'JavaScript': self.js_extensions, 'Objective-C': self.objective_c_extensions, 'Swift': self.swift_extensions, 'C#': self.c_sharp_extensions, 'Python': self.python_extensions}
|
def __get_files_per_language(self):
'\n :returns: A default directory of the considered languages, their\n extensions and the amount of files that have that extension\n (default=0)\n '
return {'C++': {extension: 0 for extension in self.cpp_extensions}, 'C': {extension: 0 for extension in self.c_extensions}, 'Rust': {extension: 0 for extension in self.rust_extensions}, 'Ruby': {extension: 0 for extension in self.ruby_extensions}, 'Java': {extension: 0 for extension in self.java_extensions}, 'Go': {extension: 0 for extension in self.go_extensions}, 'PHP': {extension: 0 for extension in self.php_extensions}, 'JavaScript': {extension: 0 for extension in self.js_extensions}, 'Objective-C': {extension: 0 for extension in self.objective_c_extensions}, 'Swift': {extension: 0 for extension in self.swift_extensions}, 'C#': {extension: 0 for extension in self.c_sharp_extensions}, 'Python': {extension: 0 for extension in self.python_extensions}}
| -689,377,611,099,756,500
|
:returns: A default directory of the considered languages, their
extensions and the amount of files that have that extension
(default=0)
|
gitScrabber/scrabTasks/file/languageDetector.py
|
__get_files_per_language
|
Eyenseo/gitScrabber
|
python
|
def __get_files_per_language(self):
'\n :returns: A default directory of the considered languages, their\n extensions and the amount of files that have that extension\n (default=0)\n '
return {'C++': {extension: 0 for extension in self.cpp_extensions}, 'C': {extension: 0 for extension in self.c_extensions}, 'Rust': {extension: 0 for extension in self.rust_extensions}, 'Ruby': {extension: 0 for extension in self.ruby_extensions}, 'Java': {extension: 0 for extension in self.java_extensions}, 'Go': {extension: 0 for extension in self.go_extensions}, 'PHP': {extension: 0 for extension in self.php_extensions}, 'JavaScript': {extension: 0 for extension in self.js_extensions}, 'Objective-C': {extension: 0 for extension in self.objective_c_extensions}, 'Swift': {extension: 0 for extension in self.swift_extensions}, 'C#': {extension: 0 for extension in self.c_sharp_extensions}, 'Python': {extension: 0 for extension in self.python_extensions}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.