body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
bbf23fd82b262dd642541e44bad086270706f2576ba107ae7f2487a228d3a544
def init(mpstate): 'Initialize module' return ProxyModule(mpstate)
Initialize module
sitl_server/sitl-proxy/sitl_proxy/proxy.py
init
GaloisInc/planning-synthesis
1
python
def init(mpstate): return ProxyModule(mpstate)
def init(mpstate): return ProxyModule(mpstate)<|docstring|>Initialize module<|endoftext|>
7329a121045f91a28a7a249db1102b00b9e8d26efe11c1edd566f048fa31475a
def test_simple_schema(): 'Test with simple schema.' assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?'}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}])
Test with simple schema.
tests/addons/test_ui_schema.py
test_simple_schema
gdrapp/supervisor
24
python
def test_simple_schema(): assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?'}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}])
def test_simple_schema(): assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?'}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}])<|docstring|>Test with simple schema.<|endoftext|>
9d7698ec1d71f8f509e01e68fcde01bd692802a6accf01ef1096fe303ea2f7e2
def test_group_schema(): 'Test with group schema.' assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?', 'extended': {'name': 'str', 'data': ['str'], 'path': 'str?'}}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}, {'multiple': False, 'name': 'extended', 'optional': True, 'schema': [{'name': 'name', 'required': True, 'type': 'string'}, {'multiple': True, 'name': 'data', 'required': True, 'type': 'string'}, {'name': 'path', 'optional': True, 'type': 'string'}], 'type': 'schema'}])
Test with group schema.
tests/addons/test_ui_schema.py
test_group_schema
gdrapp/supervisor
24
python
def test_group_schema(): assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?', 'extended': {'name': 'str', 'data': ['str'], 'path': 'str?'}}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}, {'multiple': False, 'name': 'extended', 'optional': True, 'schema': [{'name': 'name', 'required': True, 'type': 'string'}, {'multiple': True, 'name': 'data', 'required': True, 'type': 'string'}, {'name': 'path', 'optional': True, 'type': 'string'}], 'type': 'schema'}])
def test_group_schema(): assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?', 'extended': {'name': 'str', 'data': ['str'], 'path': 'str?'}}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}, {'multiple': False, 'name': 'extended', 'optional': True, 'schema': [{'name': 'name', 'required': True, 'type': 'string'}, {'multiple': True, 'name': 'data', 'required': True, 'type': 'string'}, {'name': 'path', 'optional': True, 'type': 'string'}], 'type': 'schema'}])<|docstring|>Test with group schema.<|endoftext|>
c80d1647918260ebef6e9cc94b3d646d4304343d11c97abac0593511aa821935
def test_group_list(): 'Test with group schema.' assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?', 'extended': [{'name': 'str', 'data': ['str?'], 'path': 'str?'}]}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}, {'multiple': True, 'name': 'extended', 'optional': True, 'schema': [{'name': 'name', 'required': True, 'type': 'string'}, {'multiple': True, 'name': 'data', 'optional': True, 'type': 'string'}, {'name': 'path', 'optional': True, 'type': 'string'}], 'type': 'schema'}])
Test with group schema.
tests/addons/test_ui_schema.py
test_group_list
gdrapp/supervisor
24
python
def test_group_list(): assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?', 'extended': [{'name': 'str', 'data': ['str?'], 'path': 'str?'}]}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}, {'multiple': True, 'name': 'extended', 'optional': True, 'schema': [{'name': 'name', 'required': True, 'type': 'string'}, {'multiple': True, 'name': 'data', 'optional': True, 'type': 'string'}, {'name': 'path', 'optional': True, 'type': 'string'}], 'type': 'schema'}])
def test_group_list(): assert (schema_ui_options({'name': 'str', 'password': 'password', 'fires': 'bool', 'alias': 'str?', 'extended': [{'name': 'str', 'data': ['str?'], 'path': 'str?'}]}) == [{'name': 'name', 'required': True, 'type': 'string'}, {'format': 'password', 'name': 'password', 'required': True, 'type': 'string'}, {'name': 'fires', 'required': True, 'type': 'boolean'}, {'name': 'alias', 'optional': True, 'type': 'string'}, {'multiple': True, 'name': 'extended', 'optional': True, 'schema': [{'name': 'name', 'required': True, 'type': 'string'}, {'multiple': True, 'name': 'data', 'optional': True, 'type': 'string'}, {'name': 'path', 'optional': True, 'type': 'string'}], 'type': 'schema'}])<|docstring|>Test with group schema.<|endoftext|>
cf5c796ad4e66d7335565b08de8d4f988a38ec0cefc192995a34c0c906b678f0
def cols_unique(data: pd.DataFrame, cols: list) -> np.ndarray: 'Find unique values across multiple columns (of same type)\n\n Alex M Trueman, 2019-07-12\n\n This function looks at the values in all the listed columns and\n creates a sorted array of their unique values. `NaNs` are not\n included in this list.\n ' uvals = np.concatenate(tuple((data[i].unique() for i in cols)), axis=0) uvals = uvals[(~ pd.isnull(uvals))] uvals = np.unique(uvals) return uvals
Find unique values across multiple columns (of same type) Alex M Trueman, 2019-07-12 This function looks at the values in all the listed columns and creates a sorted array of their unique values. `NaNs` are not included in this list.
myutils/cols_unique.py
cols_unique
truemoid/myutils
0
python
def cols_unique(data: pd.DataFrame, cols: list) -> np.ndarray: 'Find unique values across multiple columns (of same type)\n\n Alex M Trueman, 2019-07-12\n\n This function looks at the values in all the listed columns and\n creates a sorted array of their unique values. `NaNs` are not\n included in this list.\n ' uvals = np.concatenate(tuple((data[i].unique() for i in cols)), axis=0) uvals = uvals[(~ pd.isnull(uvals))] uvals = np.unique(uvals) return uvals
def cols_unique(data: pd.DataFrame, cols: list) -> np.ndarray: 'Find unique values across multiple columns (of same type)\n\n Alex M Trueman, 2019-07-12\n\n This function looks at the values in all the listed columns and\n creates a sorted array of their unique values. `NaNs` are not\n included in this list.\n ' uvals = np.concatenate(tuple((data[i].unique() for i in cols)), axis=0) uvals = uvals[(~ pd.isnull(uvals))] uvals = np.unique(uvals) return uvals<|docstring|>Find unique values across multiple columns (of same type) Alex M Trueman, 2019-07-12 This function looks at the values in all the listed columns and creates a sorted array of their unique values. `NaNs` are not included in this list.<|endoftext|>
6e7aa6e51da34b7218f8e251c0fed2618b4dbba4bc863de537e062033b67dd0e
def coilpack_sense_forward(x, smap, om, interpob, interp_mats=None): 'Coil-packing SENSE-NUFFT operation.\n\n This function applies "coil-packing" SENSE-NUFFT operation, which is just a\n normal SENSE-NUFFT operation with a reshape command that puts the batch\n dimension into the coil dimension prior to the NUFFT. With the\n implementation in the package, the code will then broadcast NUFFT\n operations across the combined dimension, which is extremely fast.\n\n This is the fastest way to do NUFFT operations on a slice stack of\n multi-coil data, where the slices are stored in the batch dimension.\n\n Args:\n x (tensor): The input images of size (nbatch, ncoil, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n om (tensor): The k-space trajectory in units of radians/voxel of size\n (1, ndims, klength).\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2,\n klength).\n ' ncoil = smap.shape[1] y = complex_mult(x, smap, dim=2) new_sz = ((1, (- 1), 2) + tuple(smap.shape[3:])) y = y.view(*new_sz) y = KbNufftFunction.apply(y, om, interpob, interp_mats) new_sz = ((- 1), ncoil, 2, y.shape[(- 1)]) y = y.view(*new_sz) return y
Coil-packing SENSE-NUFFT operation. This function applies "coil-packing" SENSE-NUFFT operation, which is just a normal SENSE-NUFFT operation with a reshape command that puts the batch dimension into the coil dimension prior to the NUFFT. With the implementation in the package, the code will then broadcast NUFFT operations across the combined dimension, which is extremely fast. This is the fastest way to do NUFFT operations on a slice stack of multi-coil data, where the slices are stored in the batch dimension. Args: x (tensor): The input images of size (nbatch, ncoil, 2) + im_size. smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. om (tensor): The k-space trajectory in units of radians/voxel of size (1, ndims, klength). interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2, klength).
torchkbnufft/mri/sensenufft_functions.py
coilpack_sense_forward
zaccharieramzi/torchkbnufft
1
python
def coilpack_sense_forward(x, smap, om, interpob, interp_mats=None): 'Coil-packing SENSE-NUFFT operation.\n\n This function applies "coil-packing" SENSE-NUFFT operation, which is just a\n normal SENSE-NUFFT operation with a reshape command that puts the batch\n dimension into the coil dimension prior to the NUFFT. With the\n implementation in the package, the code will then broadcast NUFFT\n operations across the combined dimension, which is extremely fast.\n\n This is the fastest way to do NUFFT operations on a slice stack of\n multi-coil data, where the slices are stored in the batch dimension.\n\n Args:\n x (tensor): The input images of size (nbatch, ncoil, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n om (tensor): The k-space trajectory in units of radians/voxel of size\n (1, ndims, klength).\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2,\n klength).\n ' ncoil = smap.shape[1] y = complex_mult(x, smap, dim=2) new_sz = ((1, (- 1), 2) + tuple(smap.shape[3:])) y = y.view(*new_sz) y = KbNufftFunction.apply(y, om, interpob, interp_mats) new_sz = ((- 1), ncoil, 2, y.shape[(- 1)]) y = y.view(*new_sz) return y
def coilpack_sense_forward(x, smap, om, interpob, interp_mats=None): 'Coil-packing SENSE-NUFFT operation.\n\n This function applies "coil-packing" SENSE-NUFFT operation, which is just a\n normal SENSE-NUFFT operation with a reshape command that puts the batch\n dimension into the coil dimension prior to the NUFFT. With the\n implementation in the package, the code will then broadcast NUFFT\n operations across the combined dimension, which is extremely fast.\n\n This is the fastest way to do NUFFT operations on a slice stack of\n multi-coil data, where the slices are stored in the batch dimension.\n\n Args:\n x (tensor): The input images of size (nbatch, ncoil, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n om (tensor): The k-space trajectory in units of radians/voxel of size\n (1, ndims, klength).\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2,\n klength).\n ' ncoil = smap.shape[1] y = complex_mult(x, smap, dim=2) new_sz = ((1, (- 1), 2) + tuple(smap.shape[3:])) y = y.view(*new_sz) y = KbNufftFunction.apply(y, om, interpob, interp_mats) new_sz = ((- 1), ncoil, 2, y.shape[(- 1)]) y = y.view(*new_sz) return y<|docstring|>Coil-packing SENSE-NUFFT operation. This function applies "coil-packing" SENSE-NUFFT operation, which is just a normal SENSE-NUFFT operation with a reshape command that puts the batch dimension into the coil dimension prior to the NUFFT. With the implementation in the package, the code will then broadcast NUFFT operations across the combined dimension, which is extremely fast. This is the fastest way to do NUFFT operations on a slice stack of multi-coil data, where the slices are stored in the batch dimension. Args: x (tensor): The input images of size (nbatch, ncoil, 2) + im_size. smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. om (tensor): The k-space trajectory in units of radians/voxel of size (1, ndims, klength). interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2, klength).<|endoftext|>
a5b528a7760a84b56e39bf4d094673b9089b45350158972e4b75fa2a2e711971
def coilpack_sense_backward(y, smap, om, interpob, interp_mats=None): 'Coil-packing SENSE-NUFFT adjoint operation.\n\n This function applies "coil-packing" SENSE-NUFFT adjoint operation, which is\n a normal SENSE-NUFFT adjoint operation with a reshape command that puts the\n batch dimension into the coil dimension prior to the NUFFT. With the\n implementation in the package, the code will then broadcast NUFFT operations\n across the combined dimension, which is extremely fast.\n\n This is the fastest way to do NUFFT operations on a slice stack of\n multi-coil data, where the slices are stored in the batch dimension.\n\n Args:\n y (tensor): The input images of size (nbatch, ncoil, 2, klength).\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n om (tensor): The k-space trajectory in units of radians/voxel of size\n (1, ndims, klength).\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) +\n im_size.\n ' ncoil = smap.shape[1] new_sz = (1, (- 1), 2, y.shape[(- 1)]) y = y.view(*new_sz) x = AdjKbNufftFunction.apply(y, om, interpob, interp_mats) new_sz = (((- 1), ncoil, 2) + tuple(smap.shape[3:])) x = x.view(*new_sz) x = torch.sum(conj_complex_mult(x, smap, dim=2), dim=1, keepdim=True) return x
Coil-packing SENSE-NUFFT adjoint operation. This function applies "coil-packing" SENSE-NUFFT adjoint operation, which is a normal SENSE-NUFFT adjoint operation with a reshape command that puts the batch dimension into the coil dimension prior to the NUFFT. With the implementation in the package, the code will then broadcast NUFFT operations across the combined dimension, which is extremely fast. This is the fastest way to do NUFFT operations on a slice stack of multi-coil data, where the slices are stored in the batch dimension. Args: y (tensor): The input images of size (nbatch, ncoil, 2, klength). smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. om (tensor): The k-space trajectory in units of radians/voxel of size (1, ndims, klength). interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) + im_size.
torchkbnufft/mri/sensenufft_functions.py
coilpack_sense_backward
zaccharieramzi/torchkbnufft
1
python
def coilpack_sense_backward(y, smap, om, interpob, interp_mats=None): 'Coil-packing SENSE-NUFFT adjoint operation.\n\n This function applies "coil-packing" SENSE-NUFFT adjoint operation, which is\n a normal SENSE-NUFFT adjoint operation with a reshape command that puts the\n batch dimension into the coil dimension prior to the NUFFT. With the\n implementation in the package, the code will then broadcast NUFFT operations\n across the combined dimension, which is extremely fast.\n\n This is the fastest way to do NUFFT operations on a slice stack of\n multi-coil data, where the slices are stored in the batch dimension.\n\n Args:\n y (tensor): The input images of size (nbatch, ncoil, 2, klength).\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n om (tensor): The k-space trajectory in units of radians/voxel of size\n (1, ndims, klength).\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) +\n im_size.\n ' ncoil = smap.shape[1] new_sz = (1, (- 1), 2, y.shape[(- 1)]) y = y.view(*new_sz) x = AdjKbNufftFunction.apply(y, om, interpob, interp_mats) new_sz = (((- 1), ncoil, 2) + tuple(smap.shape[3:])) x = x.view(*new_sz) x = torch.sum(conj_complex_mult(x, smap, dim=2), dim=1, keepdim=True) return x
def coilpack_sense_backward(y, smap, om, interpob, interp_mats=None): 'Coil-packing SENSE-NUFFT adjoint operation.\n\n This function applies "coil-packing" SENSE-NUFFT adjoint operation, which is\n a normal SENSE-NUFFT adjoint operation with a reshape command that puts the\n batch dimension into the coil dimension prior to the NUFFT. With the\n implementation in the package, the code will then broadcast NUFFT operations\n across the combined dimension, which is extremely fast.\n\n This is the fastest way to do NUFFT operations on a slice stack of\n multi-coil data, where the slices are stored in the batch dimension.\n\n Args:\n y (tensor): The input images of size (nbatch, ncoil, 2, klength).\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n om (tensor): The k-space trajectory in units of radians/voxel of size\n (1, ndims, klength).\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) +\n im_size.\n ' ncoil = smap.shape[1] new_sz = (1, (- 1), 2, y.shape[(- 1)]) y = y.view(*new_sz) x = AdjKbNufftFunction.apply(y, om, interpob, interp_mats) new_sz = (((- 1), ncoil, 2) + tuple(smap.shape[3:])) x = x.view(*new_sz) x = torch.sum(conj_complex_mult(x, smap, dim=2), dim=1, keepdim=True) return x<|docstring|>Coil-packing SENSE-NUFFT adjoint operation. This function applies "coil-packing" SENSE-NUFFT adjoint operation, which is a normal SENSE-NUFFT adjoint operation with a reshape command that puts the batch dimension into the coil dimension prior to the NUFFT. With the implementation in the package, the code will then broadcast NUFFT operations across the combined dimension, which is extremely fast. This is the fastest way to do NUFFT operations on a slice stack of multi-coil data, where the slices are stored in the batch dimension. Args: y (tensor): The input images of size (nbatch, ncoil, 2, klength). smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. om (tensor): The k-space trajectory in units of radians/voxel of size (1, ndims, klength). interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) + im_size.<|endoftext|>
04874da7a651f3181cb26fef2915d3446f21ade7db45ec5123a41dfc58e7465f
def sense_forward(x, smap, om, interpob, interp_mats=None): 'SENSE-NUFFT operation.\n\n Args:\n x (tensor): The input images of size (nbatch, ncoil, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2,\n klength).\n ' if isinstance(smap, torch.Tensor): dtype = smap.dtype device = smap.device y = torch.zeros(smap.shape, dtype=dtype, device=device) else: y = ([None] * len(smap)) for (i, im) in enumerate(x): y[i] = complex_mult(im, smap[i], dim=1) y = KbNufftFunction.apply(y, om, interpob, interp_mats) return y
SENSE-NUFFT operation. Args: x (tensor): The input images of size (nbatch, ncoil, 2) + im_size. smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2, klength).
torchkbnufft/mri/sensenufft_functions.py
sense_forward
zaccharieramzi/torchkbnufft
1
python
def sense_forward(x, smap, om, interpob, interp_mats=None): 'SENSE-NUFFT operation.\n\n Args:\n x (tensor): The input images of size (nbatch, ncoil, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2,\n klength).\n ' if isinstance(smap, torch.Tensor): dtype = smap.dtype device = smap.device y = torch.zeros(smap.shape, dtype=dtype, device=device) else: y = ([None] * len(smap)) for (i, im) in enumerate(x): y[i] = complex_mult(im, smap[i], dim=1) y = KbNufftFunction.apply(y, om, interpob, interp_mats) return y
def sense_forward(x, smap, om, interpob, interp_mats=None): 'SENSE-NUFFT operation.\n\n Args:\n x (tensor): The input images of size (nbatch, ncoil, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2,\n klength).\n ' if isinstance(smap, torch.Tensor): dtype = smap.dtype device = smap.device y = torch.zeros(smap.shape, dtype=dtype, device=device) else: y = ([None] * len(smap)) for (i, im) in enumerate(x): y[i] = complex_mult(im, smap[i], dim=1) y = KbNufftFunction.apply(y, om, interpob, interp_mats) return y<|docstring|>SENSE-NUFFT operation. Args: x (tensor): The input images of size (nbatch, ncoil, 2) + im_size. smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: Output off-grid k-space data of dimensions (nbatch, ncoil, 2, klength).<|endoftext|>
11ae05e6ad2290a38e3cd857feefbda7ece8fcf63988b93931e0380c48338de3
def sense_backward(y, smap, om, interpob, interp_mats=None): 'SENSE-NUFFT adjoint operation.\n\n Args:\n y (tensor): The input images of size (nbatch, ncoil, 2, klength).\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) +\n im_size.\n ' x = AdjKbNufftFunction.apply(y, om, interpob, interp_mats) x = list(x) for i in range(len(x)): x[i] = torch.sum(conj_complex_mult(x[i], smap[i], dim=1), dim=0, keepdim=True) if isinstance(smap, torch.Tensor): x = torch.stack(x) return x
SENSE-NUFFT adjoint operation. Args: y (tensor): The input images of size (nbatch, ncoil, 2, klength). smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) + im_size.
torchkbnufft/mri/sensenufft_functions.py
sense_backward
zaccharieramzi/torchkbnufft
1
python
def sense_backward(y, smap, om, interpob, interp_mats=None): 'SENSE-NUFFT adjoint operation.\n\n Args:\n y (tensor): The input images of size (nbatch, ncoil, 2, klength).\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) +\n im_size.\n ' x = AdjKbNufftFunction.apply(y, om, interpob, interp_mats) x = list(x) for i in range(len(x)): x[i] = torch.sum(conj_complex_mult(x[i], smap[i], dim=1), dim=0, keepdim=True) if isinstance(smap, torch.Tensor): x = torch.stack(x) return x
def sense_backward(y, smap, om, interpob, interp_mats=None): 'SENSE-NUFFT adjoint operation.\n\n Args:\n y (tensor): The input images of size (nbatch, ncoil, 2, klength).\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n interpob (dictionary): A NUFFT interpolation object.\n interp_mats (dictionary, default=None): A dictionary of sparse\n interpolation matrices. If not None, the NUFFT operation will use\n the matrices for interpolation.\n\n Returns:\n tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) +\n im_size.\n ' x = AdjKbNufftFunction.apply(y, om, interpob, interp_mats) x = list(x) for i in range(len(x)): x[i] = torch.sum(conj_complex_mult(x[i], smap[i], dim=1), dim=0, keepdim=True) if isinstance(smap, torch.Tensor): x = torch.stack(x) return x<|docstring|>SENSE-NUFFT adjoint operation. Args: y (tensor): The input images of size (nbatch, ncoil, 2, klength). smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. interpob (dictionary): A NUFFT interpolation object. interp_mats (dictionary, default=None): A dictionary of sparse interpolation matrices. If not None, the NUFFT operation will use the matrices for interpolation. Returns: tensor: The images after adjoint NUFFT of size (nbatch, ncoil, 2) + im_size.<|endoftext|>
1179954a042be9af032b7ce0eb1f9b66b88780b8a353e3f9cd07ff870f15f836
def sense_toeplitz(x, smap, kern, norm=None): "Forward/Adjoint SENSE-NUFFT with Toeplitz embedding.\n\n This function applies both a forward and adjoint SENSE-NUFFT with Toeplitz\n embedding for the NUFFT operations, thus avoiding any gridding or\n interpolation and using only FFTs (very fast).\n\n Args:\n x (tensor): The input images of size (nbatch, 1, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n kern (tensor): Embedded Toeplitz NUFFT kernel of size\n (nbatch, ncoil, 2) + im_size*2.\n norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz\n NUFFT filter.\n\n Returns:\n tensor: The images after forward and adjoint NUFFT of size\n (nbatch, 1, 2) + im_size.\n " x = list(x) for i in range(len(x)): x[i] = _sense_toep_filt(x[i], smap[i], kern[i], norm) x = torch.stack(x) return x
Forward/Adjoint SENSE-NUFFT with Toeplitz embedding. This function applies both a forward and adjoint SENSE-NUFFT with Toeplitz embedding for the NUFFT operations, thus avoiding any gridding or interpolation and using only FFTs (very fast). Args: x (tensor): The input images of size (nbatch, 1, 2) + im_size. smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. kern (tensor): Embedded Toeplitz NUFFT kernel of size (nbatch, ncoil, 2) + im_size*2. norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz NUFFT filter. Returns: tensor: The images after forward and adjoint NUFFT of size (nbatch, 1, 2) + im_size.
torchkbnufft/mri/sensenufft_functions.py
sense_toeplitz
zaccharieramzi/torchkbnufft
1
python
def sense_toeplitz(x, smap, kern, norm=None): "Forward/Adjoint SENSE-NUFFT with Toeplitz embedding.\n\n This function applies both a forward and adjoint SENSE-NUFFT with Toeplitz\n embedding for the NUFFT operations, thus avoiding any gridding or\n interpolation and using only FFTs (very fast).\n\n Args:\n x (tensor): The input images of size (nbatch, 1, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n kern (tensor): Embedded Toeplitz NUFFT kernel of size\n (nbatch, ncoil, 2) + im_size*2.\n norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz\n NUFFT filter.\n\n Returns:\n tensor: The images after forward and adjoint NUFFT of size\n (nbatch, 1, 2) + im_size.\n " x = list(x) for i in range(len(x)): x[i] = _sense_toep_filt(x[i], smap[i], kern[i], norm) x = torch.stack(x) return x
def sense_toeplitz(x, smap, kern, norm=None): "Forward/Adjoint SENSE-NUFFT with Toeplitz embedding.\n\n This function applies both a forward and adjoint SENSE-NUFFT with Toeplitz\n embedding for the NUFFT operations, thus avoiding any gridding or\n interpolation and using only FFTs (very fast).\n\n Args:\n x (tensor): The input images of size (nbatch, 1, 2) + im_size.\n smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) +\n im_size.\n kern (tensor): Embedded Toeplitz NUFFT kernel of size\n (nbatch, ncoil, 2) + im_size*2.\n norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz\n NUFFT filter.\n\n Returns:\n tensor: The images after forward and adjoint NUFFT of size\n (nbatch, 1, 2) + im_size.\n " x = list(x) for i in range(len(x)): x[i] = _sense_toep_filt(x[i], smap[i], kern[i], norm) x = torch.stack(x) return x<|docstring|>Forward/Adjoint SENSE-NUFFT with Toeplitz embedding. This function applies both a forward and adjoint SENSE-NUFFT with Toeplitz embedding for the NUFFT operations, thus avoiding any gridding or interpolation and using only FFTs (very fast). Args: x (tensor): The input images of size (nbatch, 1, 2) + im_size. smap (tensor): The sensitivity maps of size (nbatch, ncoil, 2) + im_size. kern (tensor): Embedded Toeplitz NUFFT kernel of size (nbatch, ncoil, 2) + im_size*2. norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz NUFFT filter. Returns: tensor: The images after forward and adjoint NUFFT of size (nbatch, 1, 2) + im_size.<|endoftext|>
3d8fa3451c5dda13b5a35ea354444869fc80ccba238ff0d3a6bd3a27e0ee998d
def _sense_toep_filt(x, smap, kern, norm): "Subroutine for sense_toeplitz().\n\n Args:\n x (tensor): The input images of size (1, 2) + im_size.\n smap (tensor): The sensitivity maps of size (ncoil, 2) +\n im_size.\n kern (tensor): Embedded Toeplitz NUFFT kernel of size\n (ncoil, 2) + im_size*2.\n norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz\n NUFFT filter.\n\n Returns:\n tensor: The images after forward and adjoint NUFFT of size\n (1, 2) + im_size.\n " x = complex_mult(x, smap, dim=1) x = fft_filter(x.unsqueeze(0), kern.unsqueeze(0), norm=norm).squeeze(0) x = torch.sum(conj_complex_mult(x, smap, dim=1), dim=0, keepdim=True) return x
Subroutine for sense_toeplitz(). Args: x (tensor): The input images of size (1, 2) + im_size. smap (tensor): The sensitivity maps of size (ncoil, 2) + im_size. kern (tensor): Embedded Toeplitz NUFFT kernel of size (ncoil, 2) + im_size*2. norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz NUFFT filter. Returns: tensor: The images after forward and adjoint NUFFT of size (1, 2) + im_size.
torchkbnufft/mri/sensenufft_functions.py
_sense_toep_filt
zaccharieramzi/torchkbnufft
1
python
def _sense_toep_filt(x, smap, kern, norm): "Subroutine for sense_toeplitz().\n\n Args:\n x (tensor): The input images of size (1, 2) + im_size.\n smap (tensor): The sensitivity maps of size (ncoil, 2) +\n im_size.\n kern (tensor): Embedded Toeplitz NUFFT kernel of size\n (ncoil, 2) + im_size*2.\n norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz\n NUFFT filter.\n\n Returns:\n tensor: The images after forward and adjoint NUFFT of size\n (1, 2) + im_size.\n " x = complex_mult(x, smap, dim=1) x = fft_filter(x.unsqueeze(0), kern.unsqueeze(0), norm=norm).squeeze(0) x = torch.sum(conj_complex_mult(x, smap, dim=1), dim=0, keepdim=True) return x
def _sense_toep_filt(x, smap, kern, norm): "Subroutine for sense_toeplitz().\n\n Args:\n x (tensor): The input images of size (1, 2) + im_size.\n smap (tensor): The sensitivity maps of size (ncoil, 2) +\n im_size.\n kern (tensor): Embedded Toeplitz NUFFT kernel of size\n (ncoil, 2) + im_size*2.\n norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz\n NUFFT filter.\n\n Returns:\n tensor: The images after forward and adjoint NUFFT of size\n (1, 2) + im_size.\n " x = complex_mult(x, smap, dim=1) x = fft_filter(x.unsqueeze(0), kern.unsqueeze(0), norm=norm).squeeze(0) x = torch.sum(conj_complex_mult(x, smap, dim=1), dim=0, keepdim=True) return x<|docstring|>Subroutine for sense_toeplitz(). Args: x (tensor): The input images of size (1, 2) + im_size. smap (tensor): The sensitivity maps of size (ncoil, 2) + im_size. kern (tensor): Embedded Toeplitz NUFFT kernel of size (ncoil, 2) + im_size*2. norm (str, default=None): If 'ortho', use orthogonal FFTs for Toeplitz NUFFT filter. Returns: tensor: The images after forward and adjoint NUFFT of size (1, 2) + im_size.<|endoftext|>
58e63ebaa02cf9690c03112ab439066a8f7948142c867c9c467599492b14db30
def orthogonal(scale=1.0, column_axis=(- 1)): '\n Construct an initializer for uniformly distributed orthogonal matrices.\n \n If the shape is not square, the matrices will have orthonormal rows or columns\n depending on which side is smaller.\n ' def init(key, shape, dtype=np.float32): if (len(shape) < 2): raise ValueError('orthogonal initializer requires at least a 2D shape') (n_rows, n_cols) = ((onp.prod(shape) / shape[column_axis]), shape[column_axis]) matrix_shape = ((n_cols, n_rows) if (n_rows < n_cols) else (n_rows, n_cols)) A = random.normal(key, matrix_shape, dtype) (Q, R) = np.linalg.qr(A) Q *= np.sign(np.diag(R)) if (n_rows < n_cols): Q = Q.T Q = np.reshape(Q, (np.delete(shape, column_axis) + (shape[column_axis],))) Q = np.moveaxis(Q, (- 1), column_axis) return (scale * Q) return init
Construct an initializer for uniformly distributed orthogonal matrices. If the shape is not square, the matrices will have orthonormal rows or columns depending on which side is smaller.
jax/nn/initializers.py
orthogonal
aeftimia/jax
1
python
def orthogonal(scale=1.0, column_axis=(- 1)): '\n Construct an initializer for uniformly distributed orthogonal matrices.\n \n If the shape is not square, the matrices will have orthonormal rows or columns\n depending on which side is smaller.\n ' def init(key, shape, dtype=np.float32): if (len(shape) < 2): raise ValueError('orthogonal initializer requires at least a 2D shape') (n_rows, n_cols) = ((onp.prod(shape) / shape[column_axis]), shape[column_axis]) matrix_shape = ((n_cols, n_rows) if (n_rows < n_cols) else (n_rows, n_cols)) A = random.normal(key, matrix_shape, dtype) (Q, R) = np.linalg.qr(A) Q *= np.sign(np.diag(R)) if (n_rows < n_cols): Q = Q.T Q = np.reshape(Q, (np.delete(shape, column_axis) + (shape[column_axis],))) Q = np.moveaxis(Q, (- 1), column_axis) return (scale * Q) return init
def orthogonal(scale=1.0, column_axis=(- 1)): '\n Construct an initializer for uniformly distributed orthogonal matrices.\n \n If the shape is not square, the matrices will have orthonormal rows or columns\n depending on which side is smaller.\n ' def init(key, shape, dtype=np.float32): if (len(shape) < 2): raise ValueError('orthogonal initializer requires at least a 2D shape') (n_rows, n_cols) = ((onp.prod(shape) / shape[column_axis]), shape[column_axis]) matrix_shape = ((n_cols, n_rows) if (n_rows < n_cols) else (n_rows, n_cols)) A = random.normal(key, matrix_shape, dtype) (Q, R) = np.linalg.qr(A) Q *= np.sign(np.diag(R)) if (n_rows < n_cols): Q = Q.T Q = np.reshape(Q, (np.delete(shape, column_axis) + (shape[column_axis],))) Q = np.moveaxis(Q, (- 1), column_axis) return (scale * Q) return init<|docstring|>Construct an initializer for uniformly distributed orthogonal matrices. If the shape is not square, the matrices will have orthonormal rows or columns depending on which side is smaller.<|endoftext|>
e50f0de493b93bf1cd4b958dfb928e016b181ef3c09420fd51533cdc1044ac6f
def branch(self, depth): '\n break node into 4 nodes.\n ' if (depth > 0): self.data = None self.quads = [None, None, None, None] self.x_mid = ((self.x_min + self.x_max) / 2) self.y_mid = ((self.y_min + self.y_max) / 2) self.quads[0] = QuadTreeNode((depth - 1), self.x_min, self.x_mid, self.y_min, self.y_mid) self.quads[1] = QuadTreeNode((depth - 1), self.x_mid, self.x_max, self.y_min, self.y_mid) self.quads[2] = QuadTreeNode((depth - 1), self.x_min, self.x_mid, self.y_mid, self.y_max) self.quads[3] = QuadTreeNode((depth - 1), self.x_mid, self.x_max, self.y_mid, self.y_max)
break node into 4 nodes.
HiPRGen/network_renderer.py
branch
danielbarter/HiPRGen
6
python
def branch(self, depth): '\n \n ' if (depth > 0): self.data = None self.quads = [None, None, None, None] self.x_mid = ((self.x_min + self.x_max) / 2) self.y_mid = ((self.y_min + self.y_max) / 2) self.quads[0] = QuadTreeNode((depth - 1), self.x_min, self.x_mid, self.y_min, self.y_mid) self.quads[1] = QuadTreeNode((depth - 1), self.x_mid, self.x_max, self.y_min, self.y_mid) self.quads[2] = QuadTreeNode((depth - 1), self.x_min, self.x_mid, self.y_mid, self.y_max) self.quads[3] = QuadTreeNode((depth - 1), self.x_mid, self.x_max, self.y_mid, self.y_max)
def branch(self, depth): '\n \n ' if (depth > 0): self.data = None self.quads = [None, None, None, None] self.x_mid = ((self.x_min + self.x_max) / 2) self.y_mid = ((self.y_min + self.y_max) / 2) self.quads[0] = QuadTreeNode((depth - 1), self.x_min, self.x_mid, self.y_min, self.y_mid) self.quads[1] = QuadTreeNode((depth - 1), self.x_mid, self.x_max, self.y_min, self.y_mid) self.quads[2] = QuadTreeNode((depth - 1), self.x_min, self.x_mid, self.y_mid, self.y_max) self.quads[3] = QuadTreeNode((depth - 1), self.x_mid, self.x_max, self.y_mid, self.y_max)<|docstring|>break node into 4 nodes.<|endoftext|>
b6103dcd27485044feece21307d41c757414ef413dcd4cab9acf773b59e99550
def find_neighborhood(self, x, y): "\n find all nodes adjacent to our point.\n doesn't return the node actually containing our point.\n " node = self.find_node(x, y) x_diff = (node.x_max - node.x_min) y_diff = (node.y_max - node.y_min) maybe_adjacent_nodes = [self.find_node((x + x_diff), y), self.find_node((x - x_diff), y), self.find_node(x, (y + y_diff)), self.find_node(x, (y - y_diff)), self.find_node((x + x_diff), (y + y_diff)), self.find_node((x - x_diff), (y + y_diff)), self.find_node((x + x_diff), (y - y_diff)), self.find_node((x - x_diff), (y - y_diff))] adjacent_nodes = [n for n in maybe_adjacent_nodes if (n is not None)] return adjacent_nodes
find all nodes adjacent to our point. doesn't return the node actually containing our point.
HiPRGen/network_renderer.py
find_neighborhood
danielbarter/HiPRGen
6
python
def find_neighborhood(self, x, y): "\n find all nodes adjacent to our point.\n doesn't return the node actually containing our point.\n " node = self.find_node(x, y) x_diff = (node.x_max - node.x_min) y_diff = (node.y_max - node.y_min) maybe_adjacent_nodes = [self.find_node((x + x_diff), y), self.find_node((x - x_diff), y), self.find_node(x, (y + y_diff)), self.find_node(x, (y - y_diff)), self.find_node((x + x_diff), (y + y_diff)), self.find_node((x - x_diff), (y + y_diff)), self.find_node((x + x_diff), (y - y_diff)), self.find_node((x - x_diff), (y - y_diff))] adjacent_nodes = [n for n in maybe_adjacent_nodes if (n is not None)] return adjacent_nodes
def find_neighborhood(self, x, y): "\n find all nodes adjacent to our point.\n doesn't return the node actually containing our point.\n " node = self.find_node(x, y) x_diff = (node.x_max - node.x_min) y_diff = (node.y_max - node.y_min) maybe_adjacent_nodes = [self.find_node((x + x_diff), y), self.find_node((x - x_diff), y), self.find_node(x, (y + y_diff)), self.find_node(x, (y - y_diff)), self.find_node((x + x_diff), (y + y_diff)), self.find_node((x - x_diff), (y + y_diff)), self.find_node((x + x_diff), (y - y_diff)), self.find_node((x - x_diff), (y - y_diff))] adjacent_nodes = [n for n in maybe_adjacent_nodes if (n is not None)] return adjacent_nodes<|docstring|>find all nodes adjacent to our point. doesn't return the node actually containing our point.<|endoftext|>
ebc9c5575ba714ef30f37cef3102c5163f24d8afc2a01c29f5c103fd4b564f63
def find_node(self, x, y): '\n find the terminal node so that\n x_min <= x < x_max\n y_min <= y < y_max\n return None if there is no node.\n Note: this gives the wrong answer if called from a terminal node.\n ' if (self.quads is not None): for quad in self.quads: if ((quad.x_min <= x < quad.x_max) and (quad.y_min <= y < quad.y_max)): return quad.find_node(x, y) return None else: return self
find the terminal node so that x_min <= x < x_max y_min <= y < y_max return None if there is no node. Note: this gives the wrong answer if called from a terminal node.
HiPRGen/network_renderer.py
find_node
danielbarter/HiPRGen
6
python
def find_node(self, x, y): '\n find the terminal node so that\n x_min <= x < x_max\n y_min <= y < y_max\n return None if there is no node.\n Note: this gives the wrong answer if called from a terminal node.\n ' if (self.quads is not None): for quad in self.quads: if ((quad.x_min <= x < quad.x_max) and (quad.y_min <= y < quad.y_max)): return quad.find_node(x, y) return None else: return self
def find_node(self, x, y): '\n find the terminal node so that\n x_min <= x < x_max\n y_min <= y < y_max\n return None if there is no node.\n Note: this gives the wrong answer if called from a terminal node.\n ' if (self.quads is not None): for quad in self.quads: if ((quad.x_min <= x < quad.x_max) and (quad.y_min <= y < quad.y_max)): return quad.find_node(x, y) return None else: return self<|docstring|>find the terminal node so that x_min <= x < x_max y_min <= y < y_max return None if there is no node. Note: this gives the wrong answer if called from a terminal node.<|endoftext|>
cc74344a83a217836009e53079c5c50d8ab7c504de7c5efed7b73e5770f24fbb
def format_mapping_file(headers, mapping_data, comments=None): " returns a large formatted string representing the entire mapping file\n\n each input is a list, and all data should be strings, not e.g. ints\n * headers defines column labels, and SampleID should not include a '#'\n * mapping_data is a list of lists, each sublist is a row in the mapping file\n each mapping_data sublist must be the same length as headers - use ''\n for absent data\n * if included, commments will be inserted above the header line\n comments should not include a # - that will be appended in this formatter\n " result = [] result.append(('#' + '\t'.join(headers))) if (comments != None): for comment in comments: result.append(('#' + comment)) for mapping_line in mapping_data: if (not (len(mapping_line) == len(headers))): raise RuntimeError(('error formatting mapping file, does each ' + 'sample have the same length of data as the headers?')) result.append('\t'.join(mapping_line)) str_result = '\n'.join(result) return str_result
returns a large formatted string representing the entire mapping file each input is a list, and all data should be strings, not e.g. ints * headers defines column labels, and SampleID should not include a '#' * mapping_data is a list of lists, each sublist is a row in the mapping file each mapping_data sublist must be the same length as headers - use '' for absent data * if included, commments will be inserted above the header line comments should not include a # - that will be appended in this formatter
emperor/qiime_backports/format.py
format_mapping_file
wdwvt1/emperor
0
python
def format_mapping_file(headers, mapping_data, comments=None): " returns a large formatted string representing the entire mapping file\n\n each input is a list, and all data should be strings, not e.g. ints\n * headers defines column labels, and SampleID should not include a '#'\n * mapping_data is a list of lists, each sublist is a row in the mapping file\n each mapping_data sublist must be the same length as headers - use \n for absent data\n * if included, commments will be inserted above the header line\n comments should not include a # - that will be appended in this formatter\n " result = [] result.append(('#' + '\t'.join(headers))) if (comments != None): for comment in comments: result.append(('#' + comment)) for mapping_line in mapping_data: if (not (len(mapping_line) == len(headers))): raise RuntimeError(('error formatting mapping file, does each ' + 'sample have the same length of data as the headers?')) result.append('\t'.join(mapping_line)) str_result = '\n'.join(result) return str_result
def format_mapping_file(headers, mapping_data, comments=None): " returns a large formatted string representing the entire mapping file\n\n each input is a list, and all data should be strings, not e.g. ints\n * headers defines column labels, and SampleID should not include a '#'\n * mapping_data is a list of lists, each sublist is a row in the mapping file\n each mapping_data sublist must be the same length as headers - use \n for absent data\n * if included, commments will be inserted above the header line\n comments should not include a # - that will be appended in this formatter\n " result = [] result.append(('#' + '\t'.join(headers))) if (comments != None): for comment in comments: result.append(('#' + comment)) for mapping_line in mapping_data: if (not (len(mapping_line) == len(headers))): raise RuntimeError(('error formatting mapping file, does each ' + 'sample have the same length of data as the headers?')) result.append('\t'.join(mapping_line)) str_result = '\n'.join(result) return str_result<|docstring|>returns a large formatted string representing the entire mapping file each input is a list, and all data should be strings, not e.g. ints * headers defines column labels, and SampleID should not include a '#' * mapping_data is a list of lists, each sublist is a row in the mapping file each mapping_data sublist must be the same length as headers - use '' for absent data * if included, commments will be inserted above the header line comments should not include a # - that will be appended in this formatter<|endoftext|>
85d1d1daeac913c374f358c3c8b50bcb3011bb586c7cadc184521750a50a1120
def __init__(self, state_size, action_size, env, random_seed): 'Initialize an Agent object.\n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n random_seed (int): random seed\n env: the initialized environment\n ' self.state_size = state_size self.action_size = action_size self.seed = random.seed(random_seed) self.epsilon = EPSILON self.env = env self.brain_name = self.env.brain_names[0] env_info = self.env.reset(train_mode=True)[self.brain_name] self.num_agents = len(env_info.agents) self.actor_local = Actor(state_size, action_size, random_seed).to(device) self.actor_target = Actor(state_size, action_size, random_seed).to(device) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR) self.critic_local = Critic(state_size, action_size, random_seed).to(device) self.critic_target = Critic(state_size, action_size, random_seed).to(device) self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) self.noise = OUNoise(action_size, random_seed) self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action random_seed (int): random seed env: the initialized environment
p2_continuous-control/ddpg_agent.py
__init__
ymazari/deep-reinforcement-learning
0
python
def __init__(self, state_size, action_size, env, random_seed): 'Initialize an Agent object.\n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n random_seed (int): random seed\n env: the initialized environment\n ' self.state_size = state_size self.action_size = action_size self.seed = random.seed(random_seed) self.epsilon = EPSILON self.env = env self.brain_name = self.env.brain_names[0] env_info = self.env.reset(train_mode=True)[self.brain_name] self.num_agents = len(env_info.agents) self.actor_local = Actor(state_size, action_size, random_seed).to(device) self.actor_target = Actor(state_size, action_size, random_seed).to(device) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR) self.critic_local = Critic(state_size, action_size, random_seed).to(device) self.critic_target = Critic(state_size, action_size, random_seed).to(device) self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) self.noise = OUNoise(action_size, random_seed) self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def __init__(self, state_size, action_size, env, random_seed): 'Initialize an Agent object.\n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n random_seed (int): random seed\n env: the initialized environment\n ' self.state_size = state_size self.action_size = action_size self.seed = random.seed(random_seed) self.epsilon = EPSILON self.env = env self.brain_name = self.env.brain_names[0] env_info = self.env.reset(train_mode=True)[self.brain_name] self.num_agents = len(env_info.agents) self.actor_local = Actor(state_size, action_size, random_seed).to(device) self.actor_target = Actor(state_size, action_size, random_seed).to(device) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR) self.critic_local = Critic(state_size, action_size, random_seed).to(device) self.critic_target = Critic(state_size, action_size, random_seed).to(device) self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) self.noise = OUNoise(action_size, random_seed) self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)<|docstring|>Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action random_seed (int): random seed env: the initialized environment<|endoftext|>
79739c6cee4e71b856d8c391add193591a40fd999dddf6524bc2b8075b22a040
def train(self, n_episodes=1000, max_t=1100, print_every=50): '\n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timestep per episode\n print_every: (int): interval to printing results\n ' if os.path.exists('checkpoint_actor.pth'): self.actor_local.load_state_dict(torch.load('checkpoint_actor.pth')) if os.path.exists('checkpoint_critic.pth'): self.critic_local.load_state_dict(torch.load('checkpoint_critic.pth')) scores_window = deque(maxlen=100) moving_avgs = [] mean_scores = [] max_score = (- np.inf) for i_episode in tqdm(range(1, (n_episodes + 1))): env_info = self.env.reset(train_mode=True)[self.brain_name] states = env_info.vector_observations scores = np.zeros(self.num_agents) self.reset() for t in range(max_t): actions = self.act(states, add_noise=True) env_info = self.env.step(actions)[self.brain_name] next_states = env_info.vector_observations rewards = env_info.rewards dones = env_info.local_done for (state, action, reward, next_state, done) in zip(states, actions, rewards, next_states, dones): self.step(state, action, reward, next_state, done, t) scores += env_info.rewards states = next_states if np.any(dones): break mean_scores.append(np.mean(scores)) scores_window.append(mean_scores[(- 1)]) moving_avgs.append(np.mean(scores_window)) if ((i_episode % print_every) == 0): print('\rEpisode {}\tAverage Score: {:.1f}\tMoving Avgs Score: {:.1f}'.format(i_episode, mean_scores[(- 1)], moving_avgs[(- 1)])) if (mean_scores[(- 1)] > max_score): torch.save(self.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(self.critic_local.state_dict(), 'checkpoint_critic.pth') max_score = mean_scores[(- 1)] print('\rEpisode {}\tMax Score: {:.1f}....checkpoint....'.format(i_episode, max_score)) if ((i_episode >= 100) and (moving_avgs[(- 1)] >= 30.0)): torch.save(self.actor_local.state_dict(), 'solved_actor.pth') torch.save(self.critic_local.state_dict(), 'solved_critic.pth') print('\n Solved in Episode {}\tAverage Score: {:.1f}\tMoving Avgs Score: {:.1f}'.format(i_episode, mean_scores[(- 1)], moving_avgs[(- 1)])) break return (mean_scores, moving_avgs)
Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timestep per episode print_every: (int): interval to printing results
p2_continuous-control/ddpg_agent.py
train
ymazari/deep-reinforcement-learning
0
python
def train(self, n_episodes=1000, max_t=1100, print_every=50): '\n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timestep per episode\n print_every: (int): interval to printing results\n ' if os.path.exists('checkpoint_actor.pth'): self.actor_local.load_state_dict(torch.load('checkpoint_actor.pth')) if os.path.exists('checkpoint_critic.pth'): self.critic_local.load_state_dict(torch.load('checkpoint_critic.pth')) scores_window = deque(maxlen=100) moving_avgs = [] mean_scores = [] max_score = (- np.inf) for i_episode in tqdm(range(1, (n_episodes + 1))): env_info = self.env.reset(train_mode=True)[self.brain_name] states = env_info.vector_observations scores = np.zeros(self.num_agents) self.reset() for t in range(max_t): actions = self.act(states, add_noise=True) env_info = self.env.step(actions)[self.brain_name] next_states = env_info.vector_observations rewards = env_info.rewards dones = env_info.local_done for (state, action, reward, next_state, done) in zip(states, actions, rewards, next_states, dones): self.step(state, action, reward, next_state, done, t) scores += env_info.rewards states = next_states if np.any(dones): break mean_scores.append(np.mean(scores)) scores_window.append(mean_scores[(- 1)]) moving_avgs.append(np.mean(scores_window)) if ((i_episode % print_every) == 0): print('\rEpisode {}\tAverage Score: {:.1f}\tMoving Avgs Score: {:.1f}'.format(i_episode, mean_scores[(- 1)], moving_avgs[(- 1)])) if (mean_scores[(- 1)] > max_score): torch.save(self.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(self.critic_local.state_dict(), 'checkpoint_critic.pth') max_score = mean_scores[(- 1)] print('\rEpisode {}\tMax Score: {:.1f}....checkpoint....'.format(i_episode, max_score)) if ((i_episode >= 100) and (moving_avgs[(- 1)] >= 30.0)): torch.save(self.actor_local.state_dict(), 'solved_actor.pth') torch.save(self.critic_local.state_dict(), 'solved_critic.pth') print('\n Solved in Episode {}\tAverage Score: {:.1f}\tMoving Avgs Score: {:.1f}'.format(i_episode, mean_scores[(- 1)], moving_avgs[(- 1)])) break return (mean_scores, moving_avgs)
def train(self, n_episodes=1000, max_t=1100, print_every=50): '\n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timestep per episode\n print_every: (int): interval to printing results\n ' if os.path.exists('checkpoint_actor.pth'): self.actor_local.load_state_dict(torch.load('checkpoint_actor.pth')) if os.path.exists('checkpoint_critic.pth'): self.critic_local.load_state_dict(torch.load('checkpoint_critic.pth')) scores_window = deque(maxlen=100) moving_avgs = [] mean_scores = [] max_score = (- np.inf) for i_episode in tqdm(range(1, (n_episodes + 1))): env_info = self.env.reset(train_mode=True)[self.brain_name] states = env_info.vector_observations scores = np.zeros(self.num_agents) self.reset() for t in range(max_t): actions = self.act(states, add_noise=True) env_info = self.env.step(actions)[self.brain_name] next_states = env_info.vector_observations rewards = env_info.rewards dones = env_info.local_done for (state, action, reward, next_state, done) in zip(states, actions, rewards, next_states, dones): self.step(state, action, reward, next_state, done, t) scores += env_info.rewards states = next_states if np.any(dones): break mean_scores.append(np.mean(scores)) scores_window.append(mean_scores[(- 1)]) moving_avgs.append(np.mean(scores_window)) if ((i_episode % print_every) == 0): print('\rEpisode {}\tAverage Score: {:.1f}\tMoving Avgs Score: {:.1f}'.format(i_episode, mean_scores[(- 1)], moving_avgs[(- 1)])) if (mean_scores[(- 1)] > max_score): torch.save(self.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(self.critic_local.state_dict(), 'checkpoint_critic.pth') max_score = mean_scores[(- 1)] print('\rEpisode {}\tMax Score: {:.1f}....checkpoint....'.format(i_episode, max_score)) if ((i_episode >= 100) and (moving_avgs[(- 1)] >= 30.0)): torch.save(self.actor_local.state_dict(), 'solved_actor.pth') torch.save(self.critic_local.state_dict(), 'solved_critic.pth') print('\n Solved in Episode {}\tAverage Score: {:.1f}\tMoving Avgs Score: {:.1f}'.format(i_episode, mean_scores[(- 1)], moving_avgs[(- 1)])) break return (mean_scores, moving_avgs)<|docstring|>Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timestep per episode print_every: (int): interval to printing results<|endoftext|>
6daff41409bc0677c4ba58c37b132ac15bd50b531af5950e34848177c8290f27
def step(self, state, action, reward, next_state, done, timestep): 'Save experience in replay memory, and use random sample from buffer to learn.' self.memory.add(state, action, reward, next_state, done) if ((len(self.memory) > BATCH_SIZE) and ((timestep % LEARN_INTERVAL) == 0)): for _ in range(LEARN_TIMES): experiences = self.memory.sample() self.learn(experiences, GAMMA)
Save experience in replay memory, and use random sample from buffer to learn.
p2_continuous-control/ddpg_agent.py
step
ymazari/deep-reinforcement-learning
0
python
def step(self, state, action, reward, next_state, done, timestep): self.memory.add(state, action, reward, next_state, done) if ((len(self.memory) > BATCH_SIZE) and ((timestep % LEARN_INTERVAL) == 0)): for _ in range(LEARN_TIMES): experiences = self.memory.sample() self.learn(experiences, GAMMA)
def step(self, state, action, reward, next_state, done, timestep): self.memory.add(state, action, reward, next_state, done) if ((len(self.memory) > BATCH_SIZE) and ((timestep % LEARN_INTERVAL) == 0)): for _ in range(LEARN_TIMES): experiences = self.memory.sample() self.learn(experiences, GAMMA)<|docstring|>Save experience in replay memory, and use random sample from buffer to learn.<|endoftext|>
20e360c1339a85801487f987d13dffa376260125aeb3c7a9098a9a2e2100844c
def act(self, state, add_noise=True): 'Returns actions for given state as per current policy.' state = torch.from_numpy(state).float().to(device) self.actor_local.eval() with torch.no_grad(): action = self.actor_local(state).cpu().data.numpy() self.actor_local.train() if add_noise: action += (self.epsilon * self.noise.sample()) return np.clip(action, (- 1), 1)
Returns actions for given state as per current policy.
p2_continuous-control/ddpg_agent.py
act
ymazari/deep-reinforcement-learning
0
python
def act(self, state, add_noise=True): state = torch.from_numpy(state).float().to(device) self.actor_local.eval() with torch.no_grad(): action = self.actor_local(state).cpu().data.numpy() self.actor_local.train() if add_noise: action += (self.epsilon * self.noise.sample()) return np.clip(action, (- 1), 1)
def act(self, state, add_noise=True): state = torch.from_numpy(state).float().to(device) self.actor_local.eval() with torch.no_grad(): action = self.actor_local(state).cpu().data.numpy() self.actor_local.train() if add_noise: action += (self.epsilon * self.noise.sample()) return np.clip(action, (- 1), 1)<|docstring|>Returns actions for given state as per current policy.<|endoftext|>
d79ec9c0d810c2e4738e1ab0f52c6b2db49010a326d63451c4cca3366cdda0b3
def learn(self, experiences, gamma): "Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n " (states, actions, rewards, next_states, dones) = experiences actions_next = self.actor_target(next_states) Q_targets_next = self.critic_target(next_states, actions_next) Q_targets = (rewards + ((gamma * Q_targets_next) * (1 - dones))) Q_expected = self.critic_local(states, actions) critic_loss = F.mse_loss(Q_expected, Q_targets) self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() actions_pred = self.actor_local(states) actor_loss = (- self.critic_local(states, actions_pred).mean()) self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() self.soft_update(self.critic_local, self.critic_target, TAU) self.soft_update(self.actor_local, self.actor_target, TAU) self.epsilon -= EPSILON_DECAY self.noise.reset()
Update policy and value parameters using given batch of experience tuples. Q_targets = r + γ * critic_target(next_state, actor_target(next_state)) where: actor_target(state) -> action critic_target(state, action) -> Q-value Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor
p2_continuous-control/ddpg_agent.py
learn
ymazari/deep-reinforcement-learning
0
python
def learn(self, experiences, gamma): "Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n " (states, actions, rewards, next_states, dones) = experiences actions_next = self.actor_target(next_states) Q_targets_next = self.critic_target(next_states, actions_next) Q_targets = (rewards + ((gamma * Q_targets_next) * (1 - dones))) Q_expected = self.critic_local(states, actions) critic_loss = F.mse_loss(Q_expected, Q_targets) self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() actions_pred = self.actor_local(states) actor_loss = (- self.critic_local(states, actions_pred).mean()) self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() self.soft_update(self.critic_local, self.critic_target, TAU) self.soft_update(self.actor_local, self.actor_target, TAU) self.epsilon -= EPSILON_DECAY self.noise.reset()
def learn(self, experiences, gamma): "Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n " (states, actions, rewards, next_states, dones) = experiences actions_next = self.actor_target(next_states) Q_targets_next = self.critic_target(next_states, actions_next) Q_targets = (rewards + ((gamma * Q_targets_next) * (1 - dones))) Q_expected = self.critic_local(states, actions) critic_loss = F.mse_loss(Q_expected, Q_targets) self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() actions_pred = self.actor_local(states) actor_loss = (- self.critic_local(states, actions_pred).mean()) self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() self.soft_update(self.critic_local, self.critic_target, TAU) self.soft_update(self.actor_local, self.actor_target, TAU) self.epsilon -= EPSILON_DECAY self.noise.reset()<|docstring|>Update policy and value parameters using given batch of experience tuples. Q_targets = r + γ * critic_target(next_state, actor_target(next_state)) where: actor_target(state) -> action critic_target(state, action) -> Q-value Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor<|endoftext|>
5747b77c587eabf6a732eda99b1c742e763b77132295f5323abd13724f4e601e
def soft_update(self, local_model, target_model, tau): 'Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter\n ' for (target_param, local_param) in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(((tau * local_param.data) + ((1.0 - tau) * target_param.data)))
Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model: PyTorch model (weights will be copied from) target_model: PyTorch model (weights will be copied to) tau (float): interpolation parameter
p2_continuous-control/ddpg_agent.py
soft_update
ymazari/deep-reinforcement-learning
0
python
def soft_update(self, local_model, target_model, tau): 'Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter\n ' for (target_param, local_param) in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(((tau * local_param.data) + ((1.0 - tau) * target_param.data)))
def soft_update(self, local_model, target_model, tau): 'Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter\n ' for (target_param, local_param) in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(((tau * local_param.data) + ((1.0 - tau) * target_param.data)))<|docstring|>Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model: PyTorch model (weights will be copied from) target_model: PyTorch model (weights will be copied to) tau (float): interpolation parameter<|endoftext|>
eff4b73af6ce7fe78638b0040a78dcf4338a164f243fca9a318a11c22351637c
def __init__(self, size, seed, mu=0.0, theta=0.15, sigma=0.2): 'Initialize parameters and noise process.\n Params\n ======\n mu: long-running mean\n theta: the speed of mean reversion\n sigma: the volatility parameter\n ' self.mu = (mu * np.ones(size)) self.theta = theta self.sigma = sigma self.seed = random.seed(seed) self.reset()
Initialize parameters and noise process. Params ====== mu: long-running mean theta: the speed of mean reversion sigma: the volatility parameter
p2_continuous-control/ddpg_agent.py
__init__
ymazari/deep-reinforcement-learning
0
python
def __init__(self, size, seed, mu=0.0, theta=0.15, sigma=0.2): 'Initialize parameters and noise process.\n Params\n ======\n mu: long-running mean\n theta: the speed of mean reversion\n sigma: the volatility parameter\n ' self.mu = (mu * np.ones(size)) self.theta = theta self.sigma = sigma self.seed = random.seed(seed) self.reset()
def __init__(self, size, seed, mu=0.0, theta=0.15, sigma=0.2): 'Initialize parameters and noise process.\n Params\n ======\n mu: long-running mean\n theta: the speed of mean reversion\n sigma: the volatility parameter\n ' self.mu = (mu * np.ones(size)) self.theta = theta self.sigma = sigma self.seed = random.seed(seed) self.reset()<|docstring|>Initialize parameters and noise process. Params ====== mu: long-running mean theta: the speed of mean reversion sigma: the volatility parameter<|endoftext|>
c9c41c7b63c896a153ff8c1033cd1fc462e9a63c65ff8e0ef98953f511a1a738
def reset(self): 'Reset the internal state (= noise) to mean (mu).' self.state = copy.copy(self.mu)
Reset the internal state (= noise) to mean (mu).
p2_continuous-control/ddpg_agent.py
reset
ymazari/deep-reinforcement-learning
0
python
def reset(self): self.state = copy.copy(self.mu)
def reset(self): self.state = copy.copy(self.mu)<|docstring|>Reset the internal state (= noise) to mean (mu).<|endoftext|>
e67ef452e391ae14b17f0e76b7d2404d892bdba8812ae3abc4ac6a2c1f5d0dbc
def sample(self): 'Update internal state and return it as a noise sample.' x = self.state dx = ((self.theta * (self.mu - x)) + (self.sigma * np.array([random.random() for i in range(len(x))]))) self.state = (x + dx) return self.state
Update internal state and return it as a noise sample.
p2_continuous-control/ddpg_agent.py
sample
ymazari/deep-reinforcement-learning
0
python
def sample(self): x = self.state dx = ((self.theta * (self.mu - x)) + (self.sigma * np.array([random.random() for i in range(len(x))]))) self.state = (x + dx) return self.state
def sample(self): x = self.state dx = ((self.theta * (self.mu - x)) + (self.sigma * np.array([random.random() for i in range(len(x))]))) self.state = (x + dx) return self.state<|docstring|>Update internal state and return it as a noise sample.<|endoftext|>
91f13b0112d7d8323dbb3caf5d5c8e622005dcac88739899be028a7903ecc305
def __init__(self, action_size, buffer_size, batch_size, seed): 'Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n ' self.action_size = action_size self.memory = deque(maxlen=buffer_size) self.batch_size = batch_size self.experience = namedtuple('Experience', field_names=['state', 'action', 'reward', 'next_state', 'done']) self.seed = random.seed(seed)
Initialize a ReplayBuffer object. Params ====== buffer_size (int): maximum size of buffer batch_size (int): size of each training batch
p2_continuous-control/ddpg_agent.py
__init__
ymazari/deep-reinforcement-learning
0
python
def __init__(self, action_size, buffer_size, batch_size, seed): 'Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n ' self.action_size = action_size self.memory = deque(maxlen=buffer_size) self.batch_size = batch_size self.experience = namedtuple('Experience', field_names=['state', 'action', 'reward', 'next_state', 'done']) self.seed = random.seed(seed)
def __init__(self, action_size, buffer_size, batch_size, seed): 'Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n ' self.action_size = action_size self.memory = deque(maxlen=buffer_size) self.batch_size = batch_size self.experience = namedtuple('Experience', field_names=['state', 'action', 'reward', 'next_state', 'done']) self.seed = random.seed(seed)<|docstring|>Initialize a ReplayBuffer object. Params ====== buffer_size (int): maximum size of buffer batch_size (int): size of each training batch<|endoftext|>
d9473476ea1080906ebc0e5b194fff368244743d162430b13296141597fa20dd
def add(self, state, action, reward, next_state, done): 'Add a new experience to memory.' e = self.experience(state, action, reward, next_state, done) self.memory.append(e)
Add a new experience to memory.
p2_continuous-control/ddpg_agent.py
add
ymazari/deep-reinforcement-learning
0
python
def add(self, state, action, reward, next_state, done): e = self.experience(state, action, reward, next_state, done) self.memory.append(e)
def add(self, state, action, reward, next_state, done): e = self.experience(state, action, reward, next_state, done) self.memory.append(e)<|docstring|>Add a new experience to memory.<|endoftext|>
e473fb4125f0cedb3d4f06f30557e394619e4c08a502f7b48bb5a2de65e9c6d6
def sample(self): 'Randomly sample a batch of experiences from memory.' experiences = random.sample(self.memory, k=self.batch_size) states = torch.from_numpy(np.vstack([e.state for e in experiences if (e is not None)])).float().to(device) actions = torch.from_numpy(np.vstack([e.action for e in experiences if (e is not None)])).float().to(device) rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if (e is not None)])).float().to(device) next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if (e is not None)])).float().to(device) dones = torch.from_numpy(np.vstack([e.done for e in experiences if (e is not None)]).astype(np.uint8)).float().to(device) return (states, actions, rewards, next_states, dones)
Randomly sample a batch of experiences from memory.
p2_continuous-control/ddpg_agent.py
sample
ymazari/deep-reinforcement-learning
0
python
def sample(self): experiences = random.sample(self.memory, k=self.batch_size) states = torch.from_numpy(np.vstack([e.state for e in experiences if (e is not None)])).float().to(device) actions = torch.from_numpy(np.vstack([e.action for e in experiences if (e is not None)])).float().to(device) rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if (e is not None)])).float().to(device) next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if (e is not None)])).float().to(device) dones = torch.from_numpy(np.vstack([e.done for e in experiences if (e is not None)]).astype(np.uint8)).float().to(device) return (states, actions, rewards, next_states, dones)
def sample(self): experiences = random.sample(self.memory, k=self.batch_size) states = torch.from_numpy(np.vstack([e.state for e in experiences if (e is not None)])).float().to(device) actions = torch.from_numpy(np.vstack([e.action for e in experiences if (e is not None)])).float().to(device) rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if (e is not None)])).float().to(device) next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if (e is not None)])).float().to(device) dones = torch.from_numpy(np.vstack([e.done for e in experiences if (e is not None)]).astype(np.uint8)).float().to(device) return (states, actions, rewards, next_states, dones)<|docstring|>Randomly sample a batch of experiences from memory.<|endoftext|>
2e723b55a5a5522c27f72f6fbf337d97333694e8a4668e069ff240d6281324dd
def __len__(self): 'Return the current size of internal memory.' return len(self.memory)
Return the current size of internal memory.
p2_continuous-control/ddpg_agent.py
__len__
ymazari/deep-reinforcement-learning
0
python
def __len__(self): return len(self.memory)
def __len__(self): return len(self.memory)<|docstring|>Return the current size of internal memory.<|endoftext|>
796d05dbc599930ed69f675d176346ee0ab44edc337e02bcf227bd57e61ce16a
def add_rules(self, rules): 'Add new rules to the Rules object' self.set_rules(rules, overwrite=False, use_conf=self.use_conf)
Add new rules to the Rules object
glance/api/policy.py
add_rules
venusource/glance
0
python
def add_rules(self, rules): self.set_rules(rules, overwrite=False, use_conf=self.use_conf)
def add_rules(self, rules): self.set_rules(rules, overwrite=False, use_conf=self.use_conf)<|docstring|>Add new rules to the Rules object<|endoftext|>
6fec769633ee0c05510d315c6807778f4309820009b463625187c6c70ed043b3
def enforce(self, context, action, target): 'Verifies that the action is valid on the target in this context.\n\n :param context: Glance request context\n :param action: String representing the action to be checked\n :param target: Dictionary representing the object of the action.\n :raises: `glance.common.exception.Forbidden`\n :returns: A non-False value if access is allowed.\n ' credentials = {'roles': context.roles, 'user': context.user, 'tenant': context.tenant} return super(Enforcer, self).enforce(action, target, credentials, do_raise=True, exc=exception.Forbidden, action=action)
Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :raises: `glance.common.exception.Forbidden` :returns: A non-False value if access is allowed.
glance/api/policy.py
enforce
venusource/glance
0
python
def enforce(self, context, action, target): 'Verifies that the action is valid on the target in this context.\n\n :param context: Glance request context\n :param action: String representing the action to be checked\n :param target: Dictionary representing the object of the action.\n :raises: `glance.common.exception.Forbidden`\n :returns: A non-False value if access is allowed.\n ' credentials = {'roles': context.roles, 'user': context.user, 'tenant': context.tenant} return super(Enforcer, self).enforce(action, target, credentials, do_raise=True, exc=exception.Forbidden, action=action)
def enforce(self, context, action, target): 'Verifies that the action is valid on the target in this context.\n\n :param context: Glance request context\n :param action: String representing the action to be checked\n :param target: Dictionary representing the object of the action.\n :raises: `glance.common.exception.Forbidden`\n :returns: A non-False value if access is allowed.\n ' credentials = {'roles': context.roles, 'user': context.user, 'tenant': context.tenant} return super(Enforcer, self).enforce(action, target, credentials, do_raise=True, exc=exception.Forbidden, action=action)<|docstring|>Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :raises: `glance.common.exception.Forbidden` :returns: A non-False value if access is allowed.<|endoftext|>
aa032bf93a0f0abf8dd9564aa962a782e448c3078dda5dd9446e5f3e3066aade
def check(self, context, action, target): 'Verifies that the action is valid on the target in this context.\n\n :param context: Glance request context\n :param action: String representing the action to be checked\n :param target: Dictionary representing the object of the action.\n :returns: A non-False value if access is allowed.\n ' credentials = {'roles': context.roles, 'user': context.user, 'tenant': context.tenant} return super(Enforcer, self).enforce(action, target, credentials)
Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :returns: A non-False value if access is allowed.
glance/api/policy.py
check
venusource/glance
0
python
def check(self, context, action, target): 'Verifies that the action is valid on the target in this context.\n\n :param context: Glance request context\n :param action: String representing the action to be checked\n :param target: Dictionary representing the object of the action.\n :returns: A non-False value if access is allowed.\n ' credentials = {'roles': context.roles, 'user': context.user, 'tenant': context.tenant} return super(Enforcer, self).enforce(action, target, credentials)
def check(self, context, action, target): 'Verifies that the action is valid on the target in this context.\n\n :param context: Glance request context\n :param action: String representing the action to be checked\n :param target: Dictionary representing the object of the action.\n :returns: A non-False value if access is allowed.\n ' credentials = {'roles': context.roles, 'user': context.user, 'tenant': context.tenant} return super(Enforcer, self).enforce(action, target, credentials)<|docstring|>Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :returns: A non-False value if access is allowed.<|endoftext|>
77ccccb224201c03d362c15d61d4977cf2c7fe61523c75f7d8e4ad6872fc2101
def check_is_admin(self, context): "Check if the given context is associated with an admin role,\n as defined via the 'context_is_admin' RBAC rule.\n\n :param context: Glance request context\n :returns: A non-False value if context role is admin.\n " return self.check(context, 'context_is_admin', context.to_dict())
Check if the given context is associated with an admin role, as defined via the 'context_is_admin' RBAC rule. :param context: Glance request context :returns: A non-False value if context role is admin.
glance/api/policy.py
check_is_admin
venusource/glance
0
python
def check_is_admin(self, context): "Check if the given context is associated with an admin role,\n as defined via the 'context_is_admin' RBAC rule.\n\n :param context: Glance request context\n :returns: A non-False value if context role is admin.\n " return self.check(context, 'context_is_admin', context.to_dict())
def check_is_admin(self, context): "Check if the given context is associated with an admin role,\n as defined via the 'context_is_admin' RBAC rule.\n\n :param context: Glance request context\n :returns: A non-False value if context role is admin.\n " return self.check(context, 'context_is_admin', context.to_dict())<|docstring|>Check if the given context is associated with an admin role, as defined via the 'context_is_admin' RBAC rule. :param context: Glance request context :returns: A non-False value if context role is admin.<|endoftext|>
be0ee036403f0a082ff6723b3b8eb1d87ffd15b7b59afc567606281eb4f262b0
def __init__(self, image): '\n Initialize the object\n\n :param image: Image object\n ' self.image = image
Initialize the object :param image: Image object
glance/api/policy.py
__init__
venusource/glance
0
python
def __init__(self, image): '\n Initialize the object\n\n :param image: Image object\n ' self.image = image
def __init__(self, image): '\n Initialize the object\n\n :param image: Image object\n ' self.image = image<|docstring|>Initialize the object :param image: Image object<|endoftext|>
d18747b039caa6710928b777c8d1c770d37c2587e4223a405662472dda917f18
def __getitem__(self, key): "\n Returns the value of 'key' from the image if image has that attribute\n else tries to retrieve value from the extra_properties of image.\n\n :param key: value to retrieve\n " if (key == 'id'): key = 'image_id' if hasattr(self.image, key): return getattr(self.image, key) else: return self.image.extra_properties[key]
Returns the value of 'key' from the image if image has that attribute else tries to retrieve value from the extra_properties of image. :param key: value to retrieve
glance/api/policy.py
__getitem__
venusource/glance
0
python
def __getitem__(self, key): "\n Returns the value of 'key' from the image if image has that attribute\n else tries to retrieve value from the extra_properties of image.\n\n :param key: value to retrieve\n " if (key == 'id'): key = 'image_id' if hasattr(self.image, key): return getattr(self.image, key) else: return self.image.extra_properties[key]
def __getitem__(self, key): "\n Returns the value of 'key' from the image if image has that attribute\n else tries to retrieve value from the extra_properties of image.\n\n :param key: value to retrieve\n " if (key == 'id'): key = 'image_id' if hasattr(self.image, key): return getattr(self.image, key) else: return self.image.extra_properties[key]<|docstring|>Returns the value of 'key' from the image if image has that attribute else tries to retrieve value from the extra_properties of image. :param key: value to retrieve<|endoftext|>
40ab76a819657d095a7fba7ca6d66ef41916c021ec49a846ec2257d7b55eaef7
async def get(self): '\n /api/v1/get_roles - Endpoint used to get list of roles. Used by weep and newt.\n ---\n get:\n description: Presents json-encoded list of eligible roles for the user.\n responses:\n 200:\n description: Present user with list of eligible roles.\n 403:\n description: User has failed authn/authz.\n ' self.user: str = self.requester['email'] include_all_roles = self.get_arguments('all') console_only = True if (include_all_roles == ['true']): console_only = False log_data = {'function': 'GetRolesHandler.get', 'user': self.user, 'console_only': console_only, 'message': 'Writing all eligible user roles', 'user-agent': self.request.headers.get('User-Agent'), 'request_id': self.request_uuid} log.debug(log_data) stats.count('GetRolesHandler.get', tags={'user': self.user}) (await self.authorization_flow(user=self.user, console_only=console_only)) self.write(json.dumps(sorted(self.eligible_roles))) self.set_header('Content-Type', 'application/json') (await self.finish())
/api/v1/get_roles - Endpoint used to get list of roles. Used by weep and newt. --- get: description: Presents json-encoded list of eligible roles for the user. responses: 200: description: Present user with list of eligible roles. 403: description: User has failed authn/authz.
consoleme/handlers/v1/roles.py
get
atr0phy/consoleme
2,835
python
async def get(self): '\n /api/v1/get_roles - Endpoint used to get list of roles. Used by weep and newt.\n ---\n get:\n description: Presents json-encoded list of eligible roles for the user.\n responses:\n 200:\n description: Present user with list of eligible roles.\n 403:\n description: User has failed authn/authz.\n ' self.user: str = self.requester['email'] include_all_roles = self.get_arguments('all') console_only = True if (include_all_roles == ['true']): console_only = False log_data = {'function': 'GetRolesHandler.get', 'user': self.user, 'console_only': console_only, 'message': 'Writing all eligible user roles', 'user-agent': self.request.headers.get('User-Agent'), 'request_id': self.request_uuid} log.debug(log_data) stats.count('GetRolesHandler.get', tags={'user': self.user}) (await self.authorization_flow(user=self.user, console_only=console_only)) self.write(json.dumps(sorted(self.eligible_roles))) self.set_header('Content-Type', 'application/json') (await self.finish())
async def get(self): '\n /api/v1/get_roles - Endpoint used to get list of roles. Used by weep and newt.\n ---\n get:\n description: Presents json-encoded list of eligible roles for the user.\n responses:\n 200:\n description: Present user with list of eligible roles.\n 403:\n description: User has failed authn/authz.\n ' self.user: str = self.requester['email'] include_all_roles = self.get_arguments('all') console_only = True if (include_all_roles == ['true']): console_only = False log_data = {'function': 'GetRolesHandler.get', 'user': self.user, 'console_only': console_only, 'message': 'Writing all eligible user roles', 'user-agent': self.request.headers.get('User-Agent'), 'request_id': self.request_uuid} log.debug(log_data) stats.count('GetRolesHandler.get', tags={'user': self.user}) (await self.authorization_flow(user=self.user, console_only=console_only)) self.write(json.dumps(sorted(self.eligible_roles))) self.set_header('Content-Type', 'application/json') (await self.finish())<|docstring|>/api/v1/get_roles - Endpoint used to get list of roles. Used by weep and newt. --- get: description: Presents json-encoded list of eligible roles for the user. responses: 200: description: Present user with list of eligible roles. 403: description: User has failed authn/authz.<|endoftext|>
aff4a2973745bc6e6286c402c19dc901b183157ba6e329c8d08cba2e46f35a1a
async def mqtt_record(server: str, output: str=None) -> None: 'Record MQTT messages' mqttc = mqtt.Client() mqttc.connect(server, 1883, 5) for topic in TOPICS: mqttc.subscribe(topic) if (output is not None): output_file = open(output, 'wt') else: output_file = sys.stdout def on_message(mqttc, obj, message): record = {'time': time.time(), 'qos': message.qos, 'retain': message.retain, 'topic': message.topic, 'msg_b64': base64.urlsafe_b64encode(message.payload).decode()} print(json.dumps(record), file=output_file) mqttc.on_message = on_message (await mqttc.loop_forever())
Record MQTT messages
mqtt_recorder.py
mqtt_record
Synesthesias/mqtt-recorder
0
python
async def mqtt_record(server: str, output: str=None) -> None: mqttc = mqtt.Client() mqttc.connect(server, 1883, 5) for topic in TOPICS: mqttc.subscribe(topic) if (output is not None): output_file = open(output, 'wt') else: output_file = sys.stdout def on_message(mqttc, obj, message): record = {'time': time.time(), 'qos': message.qos, 'retain': message.retain, 'topic': message.topic, 'msg_b64': base64.urlsafe_b64encode(message.payload).decode()} print(json.dumps(record), file=output_file) mqttc.on_message = on_message (await mqttc.loop_forever())
async def mqtt_record(server: str, output: str=None) -> None: mqttc = mqtt.Client() mqttc.connect(server, 1883, 5) for topic in TOPICS: mqttc.subscribe(topic) if (output is not None): output_file = open(output, 'wt') else: output_file = sys.stdout def on_message(mqttc, obj, message): record = {'time': time.time(), 'qos': message.qos, 'retain': message.retain, 'topic': message.topic, 'msg_b64': base64.urlsafe_b64encode(message.payload).decode()} print(json.dumps(record), file=output_file) mqttc.on_message = on_message (await mqttc.loop_forever())<|docstring|>Record MQTT messages<|endoftext|>
e3fbaaf030c575a38318bccc5dd8314f816024fc0df19348fe171d07bc11c28a
async def mqtt_replay(server: str, input: str=None, delay: int=0, realtime: bool=False, scale: float=1) -> None: 'Replay MQTT messages' mqttc = mqtt.Client() mqttc.connect(server, 1883, 5) if (input is not None): input_file = open(input, 'rt') else: input_file = sys.stdin if (delay > 0): static_delay_s = (delay / 1000) else: static_delay_s = 0 last_timestamp = None for line in input_file: record = json.loads(line) logger.info('%s', record) if ('msg_b64' in record): msg = base64.urlsafe_b64decode(record['msg_b64'].encode()) elif ('msg' in record): msg = record['msg'].encode() else: logger.warning('Missing message attribute: %s', record) next logger.info('Publish: %s', record) mqttc.publish(record['topic'], msg, retain=record.get('retain'), qos=0) delay_s = static_delay_s if (realtime or (scale != 1)): delay_s += (((record['time'] - last_timestamp) if last_timestamp else 0) * scale) last_timestamp = record['time'] if (delay_s > 0): logger.debug('Sleeping %.3f seconds', delay_s) (await asyncio.sleep(delay_s))
Replay MQTT messages
mqtt_recorder.py
mqtt_replay
Synesthesias/mqtt-recorder
0
python
async def mqtt_replay(server: str, input: str=None, delay: int=0, realtime: bool=False, scale: float=1) -> None: mqttc = mqtt.Client() mqttc.connect(server, 1883, 5) if (input is not None): input_file = open(input, 'rt') else: input_file = sys.stdin if (delay > 0): static_delay_s = (delay / 1000) else: static_delay_s = 0 last_timestamp = None for line in input_file: record = json.loads(line) logger.info('%s', record) if ('msg_b64' in record): msg = base64.urlsafe_b64decode(record['msg_b64'].encode()) elif ('msg' in record): msg = record['msg'].encode() else: logger.warning('Missing message attribute: %s', record) next logger.info('Publish: %s', record) mqttc.publish(record['topic'], msg, retain=record.get('retain'), qos=0) delay_s = static_delay_s if (realtime or (scale != 1)): delay_s += (((record['time'] - last_timestamp) if last_timestamp else 0) * scale) last_timestamp = record['time'] if (delay_s > 0): logger.debug('Sleeping %.3f seconds', delay_s) (await asyncio.sleep(delay_s))
async def mqtt_replay(server: str, input: str=None, delay: int=0, realtime: bool=False, scale: float=1) -> None: mqttc = mqtt.Client() mqttc.connect(server, 1883, 5) if (input is not None): input_file = open(input, 'rt') else: input_file = sys.stdin if (delay > 0): static_delay_s = (delay / 1000) else: static_delay_s = 0 last_timestamp = None for line in input_file: record = json.loads(line) logger.info('%s', record) if ('msg_b64' in record): msg = base64.urlsafe_b64decode(record['msg_b64'].encode()) elif ('msg' in record): msg = record['msg'].encode() else: logger.warning('Missing message attribute: %s', record) next logger.info('Publish: %s', record) mqttc.publish(record['topic'], msg, retain=record.get('retain'), qos=0) delay_s = static_delay_s if (realtime or (scale != 1)): delay_s += (((record['time'] - last_timestamp) if last_timestamp else 0) * scale) last_timestamp = record['time'] if (delay_s > 0): logger.debug('Sleeping %.3f seconds', delay_s) (await asyncio.sleep(delay_s))<|docstring|>Replay MQTT messages<|endoftext|>
5fabf163c8aac509336d63ba898f1b844b302f1379999317bcea972e1308d8c4
def main(): ' Main function' parser = argparse.ArgumentParser(description='MQTT recorder') parser.add_argument('--server', dest='server', metavar='server', help='MQTT broker', default='mqtt://127.0.0.1/') parser.add_argument('--mode', dest='mode', metavar='mode', choices=['record', 'replay'], help='Mode of operation (record/replay)', default='record') parser.add_argument('--input', dest='input', metavar='filename', help='Input file') parser.add_argument('--output', dest='output', metavar='filename', help='Output file') parser.add_argument('--realtime', dest='realtime', action='store_true', help='Enable realtime replay') parser.add_argument('--speed', dest='speed', type=float, default=1, metavar='factor', help='Realtime speed factor for replay (10=10x)') parser.add_argument('--delay', dest='delay', type=int, default=0, metavar='milliseconds', help='Delay between replayed events') parser.add_argument('--debug', dest='debug', action='store_true', help='Enable debugging') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) if (args.mode == 'replay'): process = mqtt_replay(server=args.server, input=args.input, delay=args.delay, realtime=args.realtime, scale=(1 / args.speed)) else: process = mqtt_record(server=args.server, output=args.output) loop = asyncio.get_event_loop() loop.run_until_complete(process)
Main function
mqtt_recorder.py
main
Synesthesias/mqtt-recorder
0
python
def main(): ' ' parser = argparse.ArgumentParser(description='MQTT recorder') parser.add_argument('--server', dest='server', metavar='server', help='MQTT broker', default='mqtt://127.0.0.1/') parser.add_argument('--mode', dest='mode', metavar='mode', choices=['record', 'replay'], help='Mode of operation (record/replay)', default='record') parser.add_argument('--input', dest='input', metavar='filename', help='Input file') parser.add_argument('--output', dest='output', metavar='filename', help='Output file') parser.add_argument('--realtime', dest='realtime', action='store_true', help='Enable realtime replay') parser.add_argument('--speed', dest='speed', type=float, default=1, metavar='factor', help='Realtime speed factor for replay (10=10x)') parser.add_argument('--delay', dest='delay', type=int, default=0, metavar='milliseconds', help='Delay between replayed events') parser.add_argument('--debug', dest='debug', action='store_true', help='Enable debugging') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) if (args.mode == 'replay'): process = mqtt_replay(server=args.server, input=args.input, delay=args.delay, realtime=args.realtime, scale=(1 / args.speed)) else: process = mqtt_record(server=args.server, output=args.output) loop = asyncio.get_event_loop() loop.run_until_complete(process)
def main(): ' ' parser = argparse.ArgumentParser(description='MQTT recorder') parser.add_argument('--server', dest='server', metavar='server', help='MQTT broker', default='mqtt://127.0.0.1/') parser.add_argument('--mode', dest='mode', metavar='mode', choices=['record', 'replay'], help='Mode of operation (record/replay)', default='record') parser.add_argument('--input', dest='input', metavar='filename', help='Input file') parser.add_argument('--output', dest='output', metavar='filename', help='Output file') parser.add_argument('--realtime', dest='realtime', action='store_true', help='Enable realtime replay') parser.add_argument('--speed', dest='speed', type=float, default=1, metavar='factor', help='Realtime speed factor for replay (10=10x)') parser.add_argument('--delay', dest='delay', type=int, default=0, metavar='milliseconds', help='Delay between replayed events') parser.add_argument('--debug', dest='debug', action='store_true', help='Enable debugging') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) if (args.mode == 'replay'): process = mqtt_replay(server=args.server, input=args.input, delay=args.delay, realtime=args.realtime, scale=(1 / args.speed)) else: process = mqtt_record(server=args.server, output=args.output) loop = asyncio.get_event_loop() loop.run_until_complete(process)<|docstring|>Main function<|endoftext|>
8f0fb410167fe734071320465258eda5d53836bb699ced55153af40b804fc01d
def hes_nb_glm_mean_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in mean model block for a given gene.\n\n Sum the following across cells:\n $h_{ij} = -x^{m_i}*x^{m_j}*mu*(x/disp)/(1+mu(disp))^2$\n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n :return: float\n Entry of fisher information matrix in mean model block at position (i,j)\n ' h_ij = ((((((- np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * mu) * x) / disp) / np.square((1 + (mu / disp)))) return np.sum(h_ij)
Compute entry of hessian in mean model block for a given gene. Sum the following across cells: $h_{ij} = -x^{m_i}*x^{m_j}*mu*(x/disp)/(1+mu(disp))^2$ Make sure that only element wise operations happen here! Do not simplify design matrix elements: they are only 0 or 1 for discrete groups but continuous if space, time, pseudotime or spline basis covariates are supplied! :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :param i: int Index of first dimension in fisher information matrix which is to be computed. :param j: int Index of second dimension in fisher information matrix which is to be computed :return: float Entry of fisher information matrix in mean model block at position (i,j)
diffxpy/models/hessian.py
hes_nb_glm_mean_block
gokceneraslan/diffxpy
0
python
def hes_nb_glm_mean_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in mean model block for a given gene.\n\n Sum the following across cells:\n $h_{ij} = -x^{m_i}*x^{m_j}*mu*(x/disp)/(1+mu(disp))^2$\n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n :return: float\n Entry of fisher information matrix in mean model block at position (i,j)\n ' h_ij = ((((((- np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * mu) * x) / disp) / np.square((1 + (mu / disp)))) return np.sum(h_ij)
def hes_nb_glm_mean_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in mean model block for a given gene.\n\n Sum the following across cells:\n $h_{ij} = -x^{m_i}*x^{m_j}*mu*(x/disp)/(1+mu(disp))^2$\n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n :return: float\n Entry of fisher information matrix in mean model block at position (i,j)\n ' h_ij = ((((((- np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * mu) * x) / disp) / np.square((1 + (mu / disp)))) return np.sum(h_ij)<|docstring|>Compute entry of hessian in mean model block for a given gene. Sum the following across cells: $h_{ij} = -x^{m_i}*x^{m_j}*mu*(x/disp)/(1+mu(disp))^2$ Make sure that only element wise operations happen here! Do not simplify design matrix elements: they are only 0 or 1 for discrete groups but continuous if space, time, pseudotime or spline basis covariates are supplied! :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :param i: int Index of first dimension in fisher information matrix which is to be computed. :param j: int Index of second dimension in fisher information matrix which is to be computed :return: float Entry of fisher information matrix in mean model block at position (i,j)<|endoftext|>
3e7f87b3a487181cae1f302f48d14fe64fc07c4d63bdcf38300ff6eaa345ae12
def hes_nb_glm_disp_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in dispersion model block for a given gene.\n\n Sum the following across cells:\n $$\n h_{ij} =\n disp * x^{m_i} * x^{m_j} * [psi_0(disp+x)\n + psi_0(disp)\n - mu/(disp+mu)^2 * (disp+x)\n +(mu-disp) / (disp+mu)\n + log(disp)\n + 1 - log(disp+mu)]\n + disp * psi_1(disp+x)\n + disp * psi_1(disp)\n $$\n \n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n \n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n\n :return: float\n Entry of fisher information matrix in dispersion model block at position (i,j)\n ' h_ij = (((((((((((disp * np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * polygamma(n=0, x=(disp + x))) + polygamma(n=0, x=disp)) - ((mu / np.square((disp + mu))) * (disp + x))) + ((mu - disp) / (disp + mu))) + np.log(disp)) + 1) - np.log((disp + mu))) + (disp * polygamma(n=1, x=(disp + x)))) + (disp * polygamma(n=1, x=disp))) return np.sum(h_ij)
Compute entry of hessian in dispersion model block for a given gene. Sum the following across cells: $$ h_{ij} = disp * x^{m_i} * x^{m_j} * [psi_0(disp+x) + psi_0(disp) - mu/(disp+mu)^2 * (disp+x) +(mu-disp) / (disp+mu) + log(disp) + 1 - log(disp+mu)] + disp * psi_1(disp+x) + disp * psi_1(disp) $$ Make sure that only element wise operations happen here! Do not simplify design matrix elements: they are only 0 or 1 for discrete groups but continuous if space, time, pseudotime or spline basis covariates are supplied! :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :param i: int Index of first dimension in fisher information matrix which is to be computed. :param j: int Index of second dimension in fisher information matrix which is to be computed :return: float Entry of fisher information matrix in dispersion model block at position (i,j)
diffxpy/models/hessian.py
hes_nb_glm_disp_block
gokceneraslan/diffxpy
0
python
def hes_nb_glm_disp_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in dispersion model block for a given gene.\n\n Sum the following across cells:\n $$\n h_{ij} =\n disp * x^{m_i} * x^{m_j} * [psi_0(disp+x)\n + psi_0(disp)\n - mu/(disp+mu)^2 * (disp+x)\n +(mu-disp) / (disp+mu)\n + log(disp)\n + 1 - log(disp+mu)]\n + disp * psi_1(disp+x)\n + disp * psi_1(disp)\n $$\n \n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n \n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n\n :return: float\n Entry of fisher information matrix in dispersion model block at position (i,j)\n ' h_ij = (((((((((((disp * np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * polygamma(n=0, x=(disp + x))) + polygamma(n=0, x=disp)) - ((mu / np.square((disp + mu))) * (disp + x))) + ((mu - disp) / (disp + mu))) + np.log(disp)) + 1) - np.log((disp + mu))) + (disp * polygamma(n=1, x=(disp + x)))) + (disp * polygamma(n=1, x=disp))) return np.sum(h_ij)
def hes_nb_glm_disp_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in dispersion model block for a given gene.\n\n Sum the following across cells:\n $$\n h_{ij} =\n disp * x^{m_i} * x^{m_j} * [psi_0(disp+x)\n + psi_0(disp)\n - mu/(disp+mu)^2 * (disp+x)\n +(mu-disp) / (disp+mu)\n + log(disp)\n + 1 - log(disp+mu)]\n + disp * psi_1(disp+x)\n + disp * psi_1(disp)\n $$\n \n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n \n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n\n :return: float\n Entry of fisher information matrix in dispersion model block at position (i,j)\n ' h_ij = (((((((((((disp * np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * polygamma(n=0, x=(disp + x))) + polygamma(n=0, x=disp)) - ((mu / np.square((disp + mu))) * (disp + x))) + ((mu - disp) / (disp + mu))) + np.log(disp)) + 1) - np.log((disp + mu))) + (disp * polygamma(n=1, x=(disp + x)))) + (disp * polygamma(n=1, x=disp))) return np.sum(h_ij)<|docstring|>Compute entry of hessian in dispersion model block for a given gene. Sum the following across cells: $$ h_{ij} = disp * x^{m_i} * x^{m_j} * [psi_0(disp+x) + psi_0(disp) - mu/(disp+mu)^2 * (disp+x) +(mu-disp) / (disp+mu) + log(disp) + 1 - log(disp+mu)] + disp * psi_1(disp+x) + disp * psi_1(disp) $$ Make sure that only element wise operations happen here! Do not simplify design matrix elements: they are only 0 or 1 for discrete groups but continuous if space, time, pseudotime or spline basis covariates are supplied! :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :param i: int Index of first dimension in fisher information matrix which is to be computed. :param j: int Index of second dimension in fisher information matrix which is to be computed :return: float Entry of fisher information matrix in dispersion model block at position (i,j)<|endoftext|>
8c5115fe01d17214282627969446b6834062f22f767e73c6d879e67f0f5894a3
def hes_nb_glm_meandisp_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in mean-dispersion model block for a given gene.\n\n Sum the following across cells:\n Need to multiply by -1 here??????\n $h_{ij} = mu*x^{m_i}*x^{m_j}*(x-mu)/(1+mu/disp)^2$\n \n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n \n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n\n :return: float\n Entry of fisher information matrix in mean-dispersion model block at position (i,j)\n ' h_ij = ((((disp * np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * (x - mu)) / np.square((1 + (mu / disp)))) return np.sum(h_ij)
Compute entry of hessian in mean-dispersion model block for a given gene. Sum the following across cells: Need to multiply by -1 here?????? $h_{ij} = mu*x^{m_i}*x^{m_j}*(x-mu)/(1+mu/disp)^2$ Make sure that only element wise operations happen here! Do not simplify design matrix elements: they are only 0 or 1 for discrete groups but continuous if space, time, pseudotime or spline basis covariates are supplied! :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :param i: int Index of first dimension in fisher information matrix which is to be computed. :param j: int Index of second dimension in fisher information matrix which is to be computed :return: float Entry of fisher information matrix in mean-dispersion model block at position (i,j)
diffxpy/models/hessian.py
hes_nb_glm_meandisp_block
gokceneraslan/diffxpy
0
python
def hes_nb_glm_meandisp_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in mean-dispersion model block for a given gene.\n\n Sum the following across cells:\n Need to multiply by -1 here??????\n $h_{ij} = mu*x^{m_i}*x^{m_j}*(x-mu)/(1+mu/disp)^2$\n \n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n \n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n\n :return: float\n Entry of fisher information matrix in mean-dispersion model block at position (i,j)\n ' h_ij = ((((disp * np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * (x - mu)) / np.square((1 + (mu / disp)))) return np.sum(h_ij)
def hes_nb_glm_meandisp_block(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray, i: int, j: int): ' Compute entry of hessian in mean-dispersion model block for a given gene.\n\n Sum the following across cells:\n Need to multiply by -1 here??????\n $h_{ij} = mu*x^{m_i}*x^{m_j}*(x-mu)/(1+mu/disp)^2$\n \n Make sure that only element wise operations happen here!\n Do not simplify design matrix elements: they are only 0 or 1 for discrete\n groups but continuous if space, time, pseudotime or spline basis covariates\n are supplied!\n \n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n :param i: int\n Index of first dimension in fisher information matrix which is to be computed.\n :param j: int\n Index of second dimension in fisher information matrix which is to be computed\n\n :return: float\n Entry of fisher information matrix in mean-dispersion model block at position (i,j)\n ' h_ij = ((((disp * np.asarray(design_loc[(:, i)])) * np.asarray(design_loc[(:, j)])) * (x - mu)) / np.square((1 + (mu / disp)))) return np.sum(h_ij)<|docstring|>Compute entry of hessian in mean-dispersion model block for a given gene. Sum the following across cells: Need to multiply by -1 here?????? $h_{ij} = mu*x^{m_i}*x^{m_j}*(x-mu)/(1+mu/disp)^2$ Make sure that only element wise operations happen here! Do not simplify design matrix elements: they are only 0 or 1 for discrete groups but continuous if space, time, pseudotime or spline basis covariates are supplied! :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :param i: int Index of first dimension in fisher information matrix which is to be computed. :param j: int Index of second dimension in fisher information matrix which is to be computed :return: float Entry of fisher information matrix in mean-dispersion model block at position (i,j)<|endoftext|>
2ac12269a16e92125a6e3369c7a7fa38b902b3f1bdb1115cc4e8268c73b0c13c
def hes_nb_glm_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute hessian for a given gene.\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model)\n Fisher information matrix.\n ' n_par_loc = design_loc.shape[1] n_par_scale = design_scale.shape[1] n_par = (n_par_loc + n_par_scale) hes = np.zeros([n_par, n_par]) for i in np.arange(0, n_par_loc): for j in np.arange(i, n_par_loc): hes[(i, j)] = hes_nb_glm_mean_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[(j, i)] = hes[(i, j)] for i in np.arange(0, n_par_scale): for j in np.arange(i, n_par_scale): hes[((n_par_loc + i), (n_par_loc + j))] = hes_nb_glm_disp_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[((n_par_loc + j), (n_par_loc + i))] = hes[((n_par_loc + i), (n_par_loc + j))] for i in np.arange(0, n_par_loc): for j in np.arange(0, n_par_scale): hes[(i, (n_par_loc + j))] = hes_nb_glm_meandisp_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[((n_par_loc + j), i)] = hes[(i, (n_par_loc + j))] return hes
Compute hessian for a given gene. :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model) Fisher information matrix.
diffxpy/models/hessian.py
hes_nb_glm_bygene
gokceneraslan/diffxpy
0
python
def hes_nb_glm_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute hessian for a given gene.\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model)\n Fisher information matrix.\n ' n_par_loc = design_loc.shape[1] n_par_scale = design_scale.shape[1] n_par = (n_par_loc + n_par_scale) hes = np.zeros([n_par, n_par]) for i in np.arange(0, n_par_loc): for j in np.arange(i, n_par_loc): hes[(i, j)] = hes_nb_glm_mean_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[(j, i)] = hes[(i, j)] for i in np.arange(0, n_par_scale): for j in np.arange(i, n_par_scale): hes[((n_par_loc + i), (n_par_loc + j))] = hes_nb_glm_disp_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[((n_par_loc + j), (n_par_loc + i))] = hes[((n_par_loc + i), (n_par_loc + j))] for i in np.arange(0, n_par_loc): for j in np.arange(0, n_par_scale): hes[(i, (n_par_loc + j))] = hes_nb_glm_meandisp_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[((n_par_loc + j), i)] = hes[(i, (n_par_loc + j))] return hes
def hes_nb_glm_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute hessian for a given gene.\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model)\n Fisher information matrix.\n ' n_par_loc = design_loc.shape[1] n_par_scale = design_scale.shape[1] n_par = (n_par_loc + n_par_scale) hes = np.zeros([n_par, n_par]) for i in np.arange(0, n_par_loc): for j in np.arange(i, n_par_loc): hes[(i, j)] = hes_nb_glm_mean_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[(j, i)] = hes[(i, j)] for i in np.arange(0, n_par_scale): for j in np.arange(i, n_par_scale): hes[((n_par_loc + i), (n_par_loc + j))] = hes_nb_glm_disp_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[((n_par_loc + j), (n_par_loc + i))] = hes[((n_par_loc + i), (n_par_loc + j))] for i in np.arange(0, n_par_loc): for j in np.arange(0, n_par_scale): hes[(i, (n_par_loc + j))] = hes_nb_glm_meandisp_block(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale, i=i, j=j) hes[((n_par_loc + j), i)] = hes[(i, (n_par_loc + j))] return hes<|docstring|>Compute hessian for a given gene. :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model) Fisher information matrix.<|endoftext|>
1dc06a452c1ec47884832a9863a5646057c5aba2a1019cf4828084e49df2b535
def theta_covar_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute model coefficient covariance matrix for a given gene.\n\n Based on the hessian matrix via fisher information matrix (fim).\n covar = inv(fim) = inv(-hess)\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix(cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model)\n Model coefficient covariance matrix.\n ' hes = hes_nb_glm_bygene(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale) return numpy.linalg.pinv((- hes))
Compute model coefficient covariance matrix for a given gene. Based on the hessian matrix via fisher information matrix (fim). covar = inv(fim) = inv(-hess) :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix(cells, #parameters shape model) Design matrix of shape model. :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model) Model coefficient covariance matrix.
diffxpy/models/hessian.py
theta_covar_bygene
gokceneraslan/diffxpy
0
python
def theta_covar_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute model coefficient covariance matrix for a given gene.\n\n Based on the hessian matrix via fisher information matrix (fim).\n covar = inv(fim) = inv(-hess)\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix(cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model)\n Model coefficient covariance matrix.\n ' hes = hes_nb_glm_bygene(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale) return numpy.linalg.pinv((- hes))
def theta_covar_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute model coefficient covariance matrix for a given gene.\n\n Based on the hessian matrix via fisher information matrix (fim).\n covar = inv(fim) = inv(-hess)\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix(cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model)\n Model coefficient covariance matrix.\n ' hes = hes_nb_glm_bygene(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale) return numpy.linalg.pinv((- hes))<|docstring|>Compute model coefficient covariance matrix for a given gene. Based on the hessian matrix via fisher information matrix (fim). covar = inv(fim) = inv(-hess) :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param mu: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix(cells, #parameters shape model) Design matrix of shape model. :return: np.ndarray (#parameters location model + #parameters shape model, #parameters location model + #parameters shape model) Model coefficient covariance matrix.<|endoftext|>
2481043da92b544f4b72c86a23fe01daf8ca4f65f0bd3f77c4eba4dfba9c26f2
def theta_sd_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute model coefficient standard deviation vector for a given gene.\n\n Based on the hessian matrix via fisher information matrix (fim).\n covar = inv(fim) = inv(-hess)\n var = diagonal of covar\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param disp: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model,)\n Model coefficient standard deviation vector.\n ' hes = hes_nb_glm_bygene(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale) return np.sqrt(numpy.linalg.pinv((- hes)).diagonal())
Compute model coefficient standard deviation vector for a given gene. Based on the hessian matrix via fisher information matrix (fim). covar = inv(fim) = inv(-hess) var = diagonal of covar :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param disp: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :return: np.ndarray (#parameters location model + #parameters shape model,) Model coefficient standard deviation vector.
diffxpy/models/hessian.py
theta_sd_bygene
gokceneraslan/diffxpy
0
python
def theta_sd_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute model coefficient standard deviation vector for a given gene.\n\n Based on the hessian matrix via fisher information matrix (fim).\n covar = inv(fim) = inv(-hess)\n var = diagonal of covar\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param disp: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model,)\n Model coefficient standard deviation vector.\n ' hes = hes_nb_glm_bygene(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale) return np.sqrt(numpy.linalg.pinv((- hes)).diagonal())
def theta_sd_bygene(x: np.ndarray, mu: np.ndarray, disp: np.ndarray, design_loc: np.ndarray, design_scale: np.ndarray): ' Compute model coefficient standard deviation vector for a given gene.\n\n Based on the hessian matrix via fisher information matrix (fim).\n covar = inv(fim) = inv(-hess)\n var = diagonal of covar\n\n :param x: np.ndarray (cells,)\n Observations for a given gene.\n :param mu: np.ndarray (cells,)\n Estimated mean parameters across cells for a given gene.\n :param disp: np.ndarray (cells,)\n Estimated dispersion parameters across cells for a given gene.\n :param design_loc: np.ndarray, matrix (cells, #parameters location model)\n Design matrix of location model.\n :param design_scale: np.ndarray, matrix (cells, #parameters shape model)\n Design matrix of shape model.\n \n :return: np.ndarray (#parameters location model + #parameters shape model,)\n Model coefficient standard deviation vector.\n ' hes = hes_nb_glm_bygene(x=x, mu=mu, disp=disp, design_loc=design_loc, design_scale=design_scale) return np.sqrt(numpy.linalg.pinv((- hes)).diagonal())<|docstring|>Compute model coefficient standard deviation vector for a given gene. Based on the hessian matrix via fisher information matrix (fim). covar = inv(fim) = inv(-hess) var = diagonal of covar :param x: np.ndarray (cells,) Observations for a given gene. :param mu: np.ndarray (cells,) Estimated mean parameters across cells for a given gene. :param disp: np.ndarray (cells,) Estimated dispersion parameters across cells for a given gene. :param design_loc: np.ndarray, matrix (cells, #parameters location model) Design matrix of location model. :param design_scale: np.ndarray, matrix (cells, #parameters shape model) Design matrix of shape model. :return: np.ndarray (#parameters location model + #parameters shape model,) Model coefficient standard deviation vector.<|endoftext|>
7dcc18f4cdc5024d36f388f34dc3e191103ed22750fd5c3e87b5ef4d29cc4916
def launch(self, session, event): 'Updates thumbnails of entities from new AssetVersion.' for entity in event['data'].get('entities', []): if ((entity['action'] == 'remove') or (entity['entityType'].lower() != 'assetversion') or ('thumbid' not in (entity.get('keys') or []))): continue version = session.get('AssetVersion', entity['entityId']) if (not version): continue thumbnail = version.get('thumbnail') if (not thumbnail): continue parent = version['asset']['parent'] task = version['task'] parent['thumbnail_id'] = version['thumbnail_id'] if (parent.entity_type.lower() == 'project'): name = parent['full_name'] else: name = parent['name'] task_msg = '' if task: task['thumbnail_id'] = version['thumbnail_id'] task_msg = ' and task [ {} ]'.format(task['name']) self.log.info('>>> Updating thumbnail for shot [ {} ]{}'.format(name, task_msg)) try: session.commit() except Exception: session.rollback()
Updates thumbnails of entities from new AssetVersion.
pype/ftrack/events/event_thumbnail_updates.py
launch
barklaya/pype
0
python
def launch(self, session, event): for entity in event['data'].get('entities', []): if ((entity['action'] == 'remove') or (entity['entityType'].lower() != 'assetversion') or ('thumbid' not in (entity.get('keys') or []))): continue version = session.get('AssetVersion', entity['entityId']) if (not version): continue thumbnail = version.get('thumbnail') if (not thumbnail): continue parent = version['asset']['parent'] task = version['task'] parent['thumbnail_id'] = version['thumbnail_id'] if (parent.entity_type.lower() == 'project'): name = parent['full_name'] else: name = parent['name'] task_msg = if task: task['thumbnail_id'] = version['thumbnail_id'] task_msg = ' and task [ {} ]'.format(task['name']) self.log.info('>>> Updating thumbnail for shot [ {} ]{}'.format(name, task_msg)) try: session.commit() except Exception: session.rollback()
def launch(self, session, event): for entity in event['data'].get('entities', []): if ((entity['action'] == 'remove') or (entity['entityType'].lower() != 'assetversion') or ('thumbid' not in (entity.get('keys') or []))): continue version = session.get('AssetVersion', entity['entityId']) if (not version): continue thumbnail = version.get('thumbnail') if (not thumbnail): continue parent = version['asset']['parent'] task = version['task'] parent['thumbnail_id'] = version['thumbnail_id'] if (parent.entity_type.lower() == 'project'): name = parent['full_name'] else: name = parent['name'] task_msg = if task: task['thumbnail_id'] = version['thumbnail_id'] task_msg = ' and task [ {} ]'.format(task['name']) self.log.info('>>> Updating thumbnail for shot [ {} ]{}'.format(name, task_msg)) try: session.commit() except Exception: session.rollback()<|docstring|>Updates thumbnails of entities from new AssetVersion.<|endoftext|>
2387a7b3989ad0a75eb8e52033011551908f6c6118ee66690e6059cc0a889265
def _get_manifest(self, configuration): '\n Read the manifest file, which contains path and file type info, and validate it.\n The manifest is expected to be at ROOT_DATA_PATH/manifest.yaml\n ' schema_file = self._get_manifest_schema_file() manifest_file = os.path.join(configuration['ROOT_DATA_PATH'], 'manifest.yaml') try: with open(manifest_file) as fd: manifest = yaml.safe_load(fd) except FileNotFoundError: raise RuntimeError(f'''No manifest file found at {manifest_file}. Please ensure that you have created a manifest that lists the files in the release''') try: validated_manifest = run_validator(schema_file=schema_file, data=manifest) except Exception as err: print(err) raise RuntimeError('The manifest file failed validation. Please recheck the file and try again.') return validated_manifest
Read the manifest file, which contains path and file type info, and validate it. The manifest is expected to be at ROOT_DATA_PATH/manifest.yaml
importers/djornl/parser.py
_get_manifest
n1mus/relation_engine
0
python
def _get_manifest(self, configuration): '\n Read the manifest file, which contains path and file type info, and validate it.\n The manifest is expected to be at ROOT_DATA_PATH/manifest.yaml\n ' schema_file = self._get_manifest_schema_file() manifest_file = os.path.join(configuration['ROOT_DATA_PATH'], 'manifest.yaml') try: with open(manifest_file) as fd: manifest = yaml.safe_load(fd) except FileNotFoundError: raise RuntimeError(f'No manifest file found at {manifest_file}. Please ensure that you have created a manifest that lists the files in the release') try: validated_manifest = run_validator(schema_file=schema_file, data=manifest) except Exception as err: print(err) raise RuntimeError('The manifest file failed validation. Please recheck the file and try again.') return validated_manifest
def _get_manifest(self, configuration): '\n Read the manifest file, which contains path and file type info, and validate it.\n The manifest is expected to be at ROOT_DATA_PATH/manifest.yaml\n ' schema_file = self._get_manifest_schema_file() manifest_file = os.path.join(configuration['ROOT_DATA_PATH'], 'manifest.yaml') try: with open(manifest_file) as fd: manifest = yaml.safe_load(fd) except FileNotFoundError: raise RuntimeError(f'No manifest file found at {manifest_file}. Please ensure that you have created a manifest that lists the files in the release') try: validated_manifest = run_validator(schema_file=schema_file, data=manifest) except Exception as err: print(err) raise RuntimeError('The manifest file failed validation. Please recheck the file and try again.') return validated_manifest<|docstring|>Read the manifest file, which contains path and file type info, and validate it. The manifest is expected to be at ROOT_DATA_PATH/manifest.yaml<|endoftext|>
a1a96ee87990dfc4af1eac96eddb5df552099eb9f82397cafd0edac3031ba645
def _get_file_reader(self, fd, file): 'Given a dict containing file information, instantiate the correct type of parser' delimiter = '\t' if ((('file_format' in file) and (file['file_format'].lower() == 'csv')) or file['path'].lower().endswith('.csv')): delimiter = ',' return csv.reader(fd, delimiter=delimiter)
Given a dict containing file information, instantiate the correct type of parser
importers/djornl/parser.py
_get_file_reader
n1mus/relation_engine
0
python
def _get_file_reader(self, fd, file): delimiter = '\t' if ((('file_format' in file) and (file['file_format'].lower() == 'csv')) or file['path'].lower().endswith('.csv')): delimiter = ',' return csv.reader(fd, delimiter=delimiter)
def _get_file_reader(self, fd, file): delimiter = '\t' if ((('file_format' in file) and (file['file_format'].lower() == 'csv')) or file['path'].lower().endswith('.csv')): delimiter = ',' return csv.reader(fd, delimiter=delimiter)<|docstring|>Given a dict containing file information, instantiate the correct type of parser<|endoftext|>
f0b62bb79b4eed3c3911bbc05f9cf3345699e48a7b9702c8e5bbec6423956461
def parser_gen(self, file): 'generator function to parse a file' expected_col_count = 0 with open(file['file_path'], newline='') as fd: csv_reader = self._get_file_reader(fd, file) line_no = 0 for row in csv_reader: line_no += 1 if ((not len(row)) or (row[0][0] == '#')): continue cols = [c.strip() for c in row] if (len(cols) == expected_col_count): (yield (line_no, cols, None)) continue if (expected_col_count == 0): expected_col_count = len(cols) (yield (line_no, [c.lower() for c in cols], None)) continue col_count = len(cols) msg = f'expected {expected_col_count} cols, found {col_count}' (yield (line_no, None, f"{file['path']} line {line_no}: {msg}"))
generator function to parse a file
importers/djornl/parser.py
parser_gen
n1mus/relation_engine
0
python
def parser_gen(self, file): expected_col_count = 0 with open(file['file_path'], newline=) as fd: csv_reader = self._get_file_reader(fd, file) line_no = 0 for row in csv_reader: line_no += 1 if ((not len(row)) or (row[0][0] == '#')): continue cols = [c.strip() for c in row] if (len(cols) == expected_col_count): (yield (line_no, cols, None)) continue if (expected_col_count == 0): expected_col_count = len(cols) (yield (line_no, [c.lower() for c in cols], None)) continue col_count = len(cols) msg = f'expected {expected_col_count} cols, found {col_count}' (yield (line_no, None, f"{file['path']} line {line_no}: {msg}"))
def parser_gen(self, file): expected_col_count = 0 with open(file['file_path'], newline=) as fd: csv_reader = self._get_file_reader(fd, file) line_no = 0 for row in csv_reader: line_no += 1 if ((not len(row)) or (row[0][0] == '#')): continue cols = [c.strip() for c in row] if (len(cols) == expected_col_count): (yield (line_no, cols, None)) continue if (expected_col_count == 0): expected_col_count = len(cols) (yield (line_no, [c.lower() for c in cols], None)) continue col_count = len(cols) msg = f'expected {expected_col_count} cols, found {col_count}' (yield (line_no, None, f"{file['path']} line {line_no}: {msg}"))<|docstring|>generator function to parse a file<|endoftext|>
9219b0f3eb507b4b142d806336fcdd33dde597e6991dc5079ec50cc0c167b1ce
def remap_object(self, raw_data, remap_functions): ' Given a dict, raw_data, create a new dict, remapped_data, using the functions in the\n dictionary `remap_functions`. ' remapped_data = {} for (key, function) in remap_functions.items(): if (function is None): if (key in raw_data): remapped_data[key] = raw_data[key] else: remapped_data[key] = function(raw_data) return remapped_data
Given a dict, raw_data, create a new dict, remapped_data, using the functions in the dictionary `remap_functions`.
importers/djornl/parser.py
remap_object
n1mus/relation_engine
0
python
def remap_object(self, raw_data, remap_functions): ' Given a dict, raw_data, create a new dict, remapped_data, using the functions in the\n dictionary `remap_functions`. ' remapped_data = {} for (key, function) in remap_functions.items(): if (function is None): if (key in raw_data): remapped_data[key] = raw_data[key] else: remapped_data[key] = function(raw_data) return remapped_data
def remap_object(self, raw_data, remap_functions): ' Given a dict, raw_data, create a new dict, remapped_data, using the functions in the\n dictionary `remap_functions`. ' remapped_data = {} for (key, function) in remap_functions.items(): if (function is None): if (key in raw_data): remapped_data[key] = raw_data[key] else: remapped_data[key] = function(raw_data) return remapped_data<|docstring|>Given a dict, raw_data, create a new dict, remapped_data, using the functions in the dictionary `remap_functions`.<|endoftext|>
872d8c27670f0468a2d1c8a3a0e578bf66013fdef893b282cefc0a401845fdd6
def process_file(self, file, remap_fn, store_fn, err_list, validator=None): ' process an input file to generate a dataset and possibly an error list\n\n Each valid line in the file is turned into a dictionary using the header row, and then\n validated against the csv validation schema in spec/datasets/djornl/csv_<file_type>.\n If that completes successfully, it is transformed using the functions in the dictionary\n `remap_fn`, checked for uniqueness against existing data, and saved to a dictionary. Once\n all files of a certain type have been processed, results can be saved to Arango.\n\n Any errors that occur during parsing and processing are accumulated in `err_list`.\n\n :param file: (dict) file data\n :param remap_fn: (dict) mapping of output param names to functions\n each function should take the row data object as input and\n return the value for the output parameter\n\n :param store_fn: (func) function to store the results of the remapping\n\n :param err_list: (list) error list\n\n :param validator: (Validator) jsonschema validator object\n\n ' file_parser = self.parser_gen(file) try: (line_no, cols, err_str) = next(file_parser) except StopIteration: err_list.append(f"{file['path']}: no header line found") return headers = cols n_stored = 0 for (line_no, cols, err_str) in file_parser: if (cols is None): err_list.append(err_str) continue row_object = dict(zip(headers, cols)) if (validator is not None): if (not validator.is_valid(row_object)): err_msg = ''.join(((f"{file['path']} line {line_no}: " + e.message) for e in sorted(validator.iter_errors(row_object), key=str))) err_list.append(err_msg) continue datum = self.remap_object(row_object, remap_fn) storage_error = store_fn(datum) if (storage_error is None): n_stored += 1 else: err_list.append((f"{file['path']} line {line_no}: " + storage_error)) if (not n_stored): err_list.append(f"{file['path']}: no valid data found")
process an input file to generate a dataset and possibly an error list Each valid line in the file is turned into a dictionary using the header row, and then validated against the csv validation schema in spec/datasets/djornl/csv_<file_type>. If that completes successfully, it is transformed using the functions in the dictionary `remap_fn`, checked for uniqueness against existing data, and saved to a dictionary. Once all files of a certain type have been processed, results can be saved to Arango. Any errors that occur during parsing and processing are accumulated in `err_list`. :param file: (dict) file data :param remap_fn: (dict) mapping of output param names to functions each function should take the row data object as input and return the value for the output parameter :param store_fn: (func) function to store the results of the remapping :param err_list: (list) error list :param validator: (Validator) jsonschema validator object
importers/djornl/parser.py
process_file
n1mus/relation_engine
0
python
def process_file(self, file, remap_fn, store_fn, err_list, validator=None): ' process an input file to generate a dataset and possibly an error list\n\n Each valid line in the file is turned into a dictionary using the header row, and then\n validated against the csv validation schema in spec/datasets/djornl/csv_<file_type>.\n If that completes successfully, it is transformed using the functions in the dictionary\n `remap_fn`, checked for uniqueness against existing data, and saved to a dictionary. Once\n all files of a certain type have been processed, results can be saved to Arango.\n\n Any errors that occur during parsing and processing are accumulated in `err_list`.\n\n :param file: (dict) file data\n :param remap_fn: (dict) mapping of output param names to functions\n each function should take the row data object as input and\n return the value for the output parameter\n\n :param store_fn: (func) function to store the results of the remapping\n\n :param err_list: (list) error list\n\n :param validator: (Validator) jsonschema validator object\n\n ' file_parser = self.parser_gen(file) try: (line_no, cols, err_str) = next(file_parser) except StopIteration: err_list.append(f"{file['path']}: no header line found") return headers = cols n_stored = 0 for (line_no, cols, err_str) in file_parser: if (cols is None): err_list.append(err_str) continue row_object = dict(zip(headers, cols)) if (validator is not None): if (not validator.is_valid(row_object)): err_msg = .join(((f"{file['path']} line {line_no}: " + e.message) for e in sorted(validator.iter_errors(row_object), key=str))) err_list.append(err_msg) continue datum = self.remap_object(row_object, remap_fn) storage_error = store_fn(datum) if (storage_error is None): n_stored += 1 else: err_list.append((f"{file['path']} line {line_no}: " + storage_error)) if (not n_stored): err_list.append(f"{file['path']}: no valid data found")
def process_file(self, file, remap_fn, store_fn, err_list, validator=None): ' process an input file to generate a dataset and possibly an error list\n\n Each valid line in the file is turned into a dictionary using the header row, and then\n validated against the csv validation schema in spec/datasets/djornl/csv_<file_type>.\n If that completes successfully, it is transformed using the functions in the dictionary\n `remap_fn`, checked for uniqueness against existing data, and saved to a dictionary. Once\n all files of a certain type have been processed, results can be saved to Arango.\n\n Any errors that occur during parsing and processing are accumulated in `err_list`.\n\n :param file: (dict) file data\n :param remap_fn: (dict) mapping of output param names to functions\n each function should take the row data object as input and\n return the value for the output parameter\n\n :param store_fn: (func) function to store the results of the remapping\n\n :param err_list: (list) error list\n\n :param validator: (Validator) jsonschema validator object\n\n ' file_parser = self.parser_gen(file) try: (line_no, cols, err_str) = next(file_parser) except StopIteration: err_list.append(f"{file['path']}: no header line found") return headers = cols n_stored = 0 for (line_no, cols, err_str) in file_parser: if (cols is None): err_list.append(err_str) continue row_object = dict(zip(headers, cols)) if (validator is not None): if (not validator.is_valid(row_object)): err_msg = .join(((f"{file['path']} line {line_no}: " + e.message) for e in sorted(validator.iter_errors(row_object), key=str))) err_list.append(err_msg) continue datum = self.remap_object(row_object, remap_fn) storage_error = store_fn(datum) if (storage_error is None): n_stored += 1 else: err_list.append((f"{file['path']} line {line_no}: " + storage_error)) if (not n_stored): err_list.append(f"{file['path']}: no valid data found")<|docstring|>process an input file to generate a dataset and possibly an error list Each valid line in the file is turned into a dictionary using the header row, and then validated against the csv validation schema in spec/datasets/djornl/csv_<file_type>. If that completes successfully, it is transformed using the functions in the dictionary `remap_fn`, checked for uniqueness against existing data, and saved to a dictionary. Once all files of a certain type have been processed, results can be saved to Arango. Any errors that occur during parsing and processing are accumulated in `err_list`. :param file: (dict) file data :param remap_fn: (dict) mapping of output param names to functions each function should take the row data object as input and return the value for the output parameter :param store_fn: (func) function to store the results of the remapping :param err_list: (list) error list :param validator: (Validator) jsonschema validator object<|endoftext|>
1777f4486407606314a4c4394d58e0d59abd35a1f2389cc1a1fbe538cae04635
def load_edges(self): 'Load edge data from the set of edge files' node_ix = {} edge_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_edge.yaml') validator = get_schema_validator(schema_file=schema_file) node_name = self.config('node_name') remap_functions = {'_key': (lambda row: '__'.join([row[_] for _ in ['node1', 'node2', 'layer_descrip', 'edge']])), 'node1': None, 'node2': None, '_from': (lambda row: ((node_name + '/') + row['node1'])), '_to': (lambda row: ((node_name + '/') + row['node2'])), 'score': (lambda row: float(row['edge'])), 'edge_type': (lambda row: row['layer_descrip'])} def store_edges(datum): edge_key = '__'.join([datum['node1'], datum['node2'], datum['edge_type']]) if (edge_key in edge_ix): if (datum['score'] == edge_ix[edge_key]['score']): return None return f'duplicate data for edge {edge_key}' for node_n in ['1', '2']: node_ix[datum[f'node{node_n}']] = 1 del datum[f'node{node_n}'] edge_ix[edge_key] = datum return None for file in self.config('edge_files'): self.process_file(file=file, remap_fn=remap_functions, store_fn=store_edges, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) return {'nodes': [{'_key': n} for n in node_ix.keys()], 'edges': edge_ix.values()}
Load edge data from the set of edge files
importers/djornl/parser.py
load_edges
n1mus/relation_engine
0
python
def load_edges(self): node_ix = {} edge_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_edge.yaml') validator = get_schema_validator(schema_file=schema_file) node_name = self.config('node_name') remap_functions = {'_key': (lambda row: '__'.join([row[_] for _ in ['node1', 'node2', 'layer_descrip', 'edge']])), 'node1': None, 'node2': None, '_from': (lambda row: ((node_name + '/') + row['node1'])), '_to': (lambda row: ((node_name + '/') + row['node2'])), 'score': (lambda row: float(row['edge'])), 'edge_type': (lambda row: row['layer_descrip'])} def store_edges(datum): edge_key = '__'.join([datum['node1'], datum['node2'], datum['edge_type']]) if (edge_key in edge_ix): if (datum['score'] == edge_ix[edge_key]['score']): return None return f'duplicate data for edge {edge_key}' for node_n in ['1', '2']: node_ix[datum[f'node{node_n}']] = 1 del datum[f'node{node_n}'] edge_ix[edge_key] = datum return None for file in self.config('edge_files'): self.process_file(file=file, remap_fn=remap_functions, store_fn=store_edges, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) return {'nodes': [{'_key': n} for n in node_ix.keys()], 'edges': edge_ix.values()}
def load_edges(self): node_ix = {} edge_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_edge.yaml') validator = get_schema_validator(schema_file=schema_file) node_name = self.config('node_name') remap_functions = {'_key': (lambda row: '__'.join([row[_] for _ in ['node1', 'node2', 'layer_descrip', 'edge']])), 'node1': None, 'node2': None, '_from': (lambda row: ((node_name + '/') + row['node1'])), '_to': (lambda row: ((node_name + '/') + row['node2'])), 'score': (lambda row: float(row['edge'])), 'edge_type': (lambda row: row['layer_descrip'])} def store_edges(datum): edge_key = '__'.join([datum['node1'], datum['node2'], datum['edge_type']]) if (edge_key in edge_ix): if (datum['score'] == edge_ix[edge_key]['score']): return None return f'duplicate data for edge {edge_key}' for node_n in ['1', '2']: node_ix[datum[f'node{node_n}']] = 1 del datum[f'node{node_n}'] edge_ix[edge_key] = datum return None for file in self.config('edge_files'): self.process_file(file=file, remap_fn=remap_functions, store_fn=store_edges, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) return {'nodes': [{'_key': n} for n in node_ix.keys()], 'edges': edge_ix.values()}<|docstring|>Load edge data from the set of edge files<|endoftext|>
cbbc24a83739050e385048fb2b1afcadfffa64876fac5a89a0add75c0630d940
def load_nodes(self): 'Load node metadata' node_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_node.yaml') validator = get_schema_validator(schema_file=schema_file) def go_terms(row): if (('go_terms' in row) and len(row['go_terms'])): return [c.strip() for c in row['go_terms'].split(',')] return [] remap_functions = {'gene_full_name': None, 'gene_model_type': None, 'gene_symbol': None, 'go_description': None, 'mapman_bin': None, 'mapman_description': None, 'mapman_name': None, 'node_type': None, 'pheno_aragwas_id': None, 'pheno_description': None, 'pheno_pto_description': None, 'pheno_pto_name': None, 'pheno_ref': None, 'tair_computational_description': None, 'tair_curator_summary': None, 'tair_short_description': None, 'transcript': None, 'user_notes': None, '_key': (lambda row: row['node_id']), 'go_terms': go_terms} def store_nodes(datum): if (datum['_key'] in node_ix): if (datum != node_ix[datum['_key']]): return f"duplicate data for node {datum['_key']}" return None node_ix[datum['_key']] = datum return None for file in self.config('node_files'): self.process_file(file=file, remap_fn=remap_functions, store_fn=store_nodes, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) return {'nodes': node_ix.values()}
Load node metadata
importers/djornl/parser.py
load_nodes
n1mus/relation_engine
0
python
def load_nodes(self): node_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_node.yaml') validator = get_schema_validator(schema_file=schema_file) def go_terms(row): if (('go_terms' in row) and len(row['go_terms'])): return [c.strip() for c in row['go_terms'].split(',')] return [] remap_functions = {'gene_full_name': None, 'gene_model_type': None, 'gene_symbol': None, 'go_description': None, 'mapman_bin': None, 'mapman_description': None, 'mapman_name': None, 'node_type': None, 'pheno_aragwas_id': None, 'pheno_description': None, 'pheno_pto_description': None, 'pheno_pto_name': None, 'pheno_ref': None, 'tair_computational_description': None, 'tair_curator_summary': None, 'tair_short_description': None, 'transcript': None, 'user_notes': None, '_key': (lambda row: row['node_id']), 'go_terms': go_terms} def store_nodes(datum): if (datum['_key'] in node_ix): if (datum != node_ix[datum['_key']]): return f"duplicate data for node {datum['_key']}" return None node_ix[datum['_key']] = datum return None for file in self.config('node_files'): self.process_file(file=file, remap_fn=remap_functions, store_fn=store_nodes, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) return {'nodes': node_ix.values()}
def load_nodes(self): node_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_node.yaml') validator = get_schema_validator(schema_file=schema_file) def go_terms(row): if (('go_terms' in row) and len(row['go_terms'])): return [c.strip() for c in row['go_terms'].split(',')] return [] remap_functions = {'gene_full_name': None, 'gene_model_type': None, 'gene_symbol': None, 'go_description': None, 'mapman_bin': None, 'mapman_description': None, 'mapman_name': None, 'node_type': None, 'pheno_aragwas_id': None, 'pheno_description': None, 'pheno_pto_description': None, 'pheno_pto_name': None, 'pheno_ref': None, 'tair_computational_description': None, 'tair_curator_summary': None, 'tair_short_description': None, 'transcript': None, 'user_notes': None, '_key': (lambda row: row['node_id']), 'go_terms': go_terms} def store_nodes(datum): if (datum['_key'] in node_ix): if (datum != node_ix[datum['_key']]): return f"duplicate data for node {datum['_key']}" return None node_ix[datum['_key']] = datum return None for file in self.config('node_files'): self.process_file(file=file, remap_fn=remap_functions, store_fn=store_nodes, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) return {'nodes': node_ix.values()}<|docstring|>Load node metadata<|endoftext|>
5725bed90a9718a430cf6fb3e0b74538856e8d7a94ec8f398469565d333f1f6a
def load_clusters(self): 'Annotate genes with cluster ID fields.' node_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_cluster.yaml') validator = get_schema_validator(schema_file=schema_file) remap_functions = {'node_ids': (lambda row: [n.strip() for n in row['node_ids'].split(',')])} def store_clusters(datum): cluster_id = datum['cluster_id'] for node_id in datum['node_ids']: if (node_id not in node_ix): node_ix[node_id] = [cluster_id] elif (cluster_id not in node_ix[node_id]): node_ix[node_id].append(cluster_id) return None for file in self.config('cluster_files'): prefix = file['cluster_prefix'] remap_functions['cluster_id'] = (lambda row: ((prefix + ':') + row['cluster_id'].replace('Cluster', ''))) self.process_file(file=file, remap_fn=remap_functions, store_fn=store_clusters, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) nodes = [{'_key': key, 'clusters': cluster_data} for (key, cluster_data) in node_ix.items()] return {'nodes': nodes}
Annotate genes with cluster ID fields.
importers/djornl/parser.py
load_clusters
n1mus/relation_engine
0
python
def load_clusters(self): node_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_cluster.yaml') validator = get_schema_validator(schema_file=schema_file) remap_functions = {'node_ids': (lambda row: [n.strip() for n in row['node_ids'].split(',')])} def store_clusters(datum): cluster_id = datum['cluster_id'] for node_id in datum['node_ids']: if (node_id not in node_ix): node_ix[node_id] = [cluster_id] elif (cluster_id not in node_ix[node_id]): node_ix[node_id].append(cluster_id) return None for file in self.config('cluster_files'): prefix = file['cluster_prefix'] remap_functions['cluster_id'] = (lambda row: ((prefix + ':') + row['cluster_id'].replace('Cluster', ))) self.process_file(file=file, remap_fn=remap_functions, store_fn=store_clusters, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) nodes = [{'_key': key, 'clusters': cluster_data} for (key, cluster_data) in node_ix.items()] return {'nodes': nodes}
def load_clusters(self): node_ix = {} err_list = [] schema_file = os.path.join(self._get_dataset_schema_dir(), 'csv_cluster.yaml') validator = get_schema_validator(schema_file=schema_file) remap_functions = {'node_ids': (lambda row: [n.strip() for n in row['node_ids'].split(',')])} def store_clusters(datum): cluster_id = datum['cluster_id'] for node_id in datum['node_ids']: if (node_id not in node_ix): node_ix[node_id] = [cluster_id] elif (cluster_id not in node_ix[node_id]): node_ix[node_id].append(cluster_id) return None for file in self.config('cluster_files'): prefix = file['cluster_prefix'] remap_functions['cluster_id'] = (lambda row: ((prefix + ':') + row['cluster_id'].replace('Cluster', ))) self.process_file(file=file, remap_fn=remap_functions, store_fn=store_clusters, err_list=err_list, validator=validator) if len(err_list): raise RuntimeError('\n'.join(err_list)) nodes = [{'_key': key, 'clusters': cluster_data} for (key, cluster_data) in node_ix.items()] return {'nodes': nodes}<|docstring|>Annotate genes with cluster ID fields.<|endoftext|>
51160000c1ef104d54e48aaa60ed926927fb14fbe6b17a510a9ac19f581d9a12
def top_products_stores(product_store_df: pd.DataFrame, quantity_dict: Dict[(str, int)]) -> pd.DataFrame: '\n Calculates top seller products or top seller stores according to the given parameters.\n :param product_store_df: dataframe of product.csv or store.csv files\n :param quantity_dict: quantity dictionary of products or stores\n :return: top seller products or top seller stores\n ' max_pairs: Dict[(str, int)] = on_equality_func(quantity_dict) product_names: List[str] = [] for pair in max_pairs: product_names.append(product_store_df.loc[((product_store_df['id'] == pair), 'name')].iloc[0]) return pd.DataFrame(list(zip(product_names, max_pairs.values())), columns=['name', 'quantity'])
Calculates top seller products or top seller stores according to the given parameters. :param product_store_df: dataframe of product.csv or store.csv files :param quantity_dict: quantity dictionary of products or stores :return: top seller products or top seller stores
python/solutions/q2/solution.py
top_products_stores
sertayy/CaseInterview
0
python
def top_products_stores(product_store_df: pd.DataFrame, quantity_dict: Dict[(str, int)]) -> pd.DataFrame: '\n Calculates top seller products or top seller stores according to the given parameters.\n :param product_store_df: dataframe of product.csv or store.csv files\n :param quantity_dict: quantity dictionary of products or stores\n :return: top seller products or top seller stores\n ' max_pairs: Dict[(str, int)] = on_equality_func(quantity_dict) product_names: List[str] = [] for pair in max_pairs: product_names.append(product_store_df.loc[((product_store_df['id'] == pair), 'name')].iloc[0]) return pd.DataFrame(list(zip(product_names, max_pairs.values())), columns=['name', 'quantity'])
def top_products_stores(product_store_df: pd.DataFrame, quantity_dict: Dict[(str, int)]) -> pd.DataFrame: '\n Calculates top seller products or top seller stores according to the given parameters.\n :param product_store_df: dataframe of product.csv or store.csv files\n :param quantity_dict: quantity dictionary of products or stores\n :return: top seller products or top seller stores\n ' max_pairs: Dict[(str, int)] = on_equality_func(quantity_dict) product_names: List[str] = [] for pair in max_pairs: product_names.append(product_store_df.loc[((product_store_df['id'] == pair), 'name')].iloc[0]) return pd.DataFrame(list(zip(product_names, max_pairs.values())), columns=['name', 'quantity'])<|docstring|>Calculates top seller products or top seller stores according to the given parameters. :param product_store_df: dataframe of product.csv or store.csv files :param quantity_dict: quantity dictionary of products or stores :return: top seller products or top seller stores<|endoftext|>
b86fea4d52dee496f503134c93375d525942d295251296291119e132e22859f7
def on_equality_func(quantity_dict) -> Dict[(str, int)]: '\n Adds the equal values to the data frame. In that case the number of rows are more than the top value.\n :param quantity_dict: quantity dictionary\n :return: a dictionary consisting of maximum values\n ' max_pairs: Dict[(str, int)] = dict(Counter(quantity_dict).most_common(TOP)) min_value = min(max_pairs, key=max_pairs.get) for product in quantity_dict: if ((product not in max_pairs) and (quantity_dict[product] == quantity_dict[min_value])): max_pairs[product] = quantity_dict[product] return max_pairs
Adds the equal values to the data frame. In that case the number of rows are more than the top value. :param quantity_dict: quantity dictionary :return: a dictionary consisting of maximum values
python/solutions/q2/solution.py
on_equality_func
sertayy/CaseInterview
0
python
def on_equality_func(quantity_dict) -> Dict[(str, int)]: '\n Adds the equal values to the data frame. In that case the number of rows are more than the top value.\n :param quantity_dict: quantity dictionary\n :return: a dictionary consisting of maximum values\n ' max_pairs: Dict[(str, int)] = dict(Counter(quantity_dict).most_common(TOP)) min_value = min(max_pairs, key=max_pairs.get) for product in quantity_dict: if ((product not in max_pairs) and (quantity_dict[product] == quantity_dict[min_value])): max_pairs[product] = quantity_dict[product] return max_pairs
def on_equality_func(quantity_dict) -> Dict[(str, int)]: '\n Adds the equal values to the data frame. In that case the number of rows are more than the top value.\n :param quantity_dict: quantity dictionary\n :return: a dictionary consisting of maximum values\n ' max_pairs: Dict[(str, int)] = dict(Counter(quantity_dict).most_common(TOP)) min_value = min(max_pairs, key=max_pairs.get) for product in quantity_dict: if ((product not in max_pairs) and (quantity_dict[product] == quantity_dict[min_value])): max_pairs[product] = quantity_dict[product] return max_pairs<|docstring|>Adds the equal values to the data frame. In that case the number of rows are more than the top value. :param quantity_dict: quantity dictionary :return: a dictionary consisting of maximum values<|endoftext|>
b85e8b6afdb420c4ab61903c8951c6700ed42526cc69f466535dcf96be085c10
def top_brands_cities(quantity_dict: Dict[(str, int)], product_store_df: pd.DataFrame, column_name: str) -> pd.DataFrame: '\n Calculates top seller brands or top seller cities according to the given parameters.\n :param quantity_dict: quantity dictionary of products or stores\n :param product_store_df: dataframe of product.csv or store.csv files\n :param column_name: brand or store\n :return: top seller brands or top seller cities\n ' best_sellers: Dict[(str, int)] = {} for key in quantity_dict: brand = product_store_df.loc[((product_store_df['id'] == key), column_name)].iloc[0] if (brand in best_sellers): best_sellers[brand] += quantity_dict[key] else: best_sellers[brand] = quantity_dict[key] max_pairs: Dict[(str, int)] = on_equality_func(best_sellers) return pd.DataFrame(max_pairs.items(), columns=[column_name, 'quantity'])
Calculates top seller brands or top seller cities according to the given parameters. :param quantity_dict: quantity dictionary of products or stores :param product_store_df: dataframe of product.csv or store.csv files :param column_name: brand or store :return: top seller brands or top seller cities
python/solutions/q2/solution.py
top_brands_cities
sertayy/CaseInterview
0
python
def top_brands_cities(quantity_dict: Dict[(str, int)], product_store_df: pd.DataFrame, column_name: str) -> pd.DataFrame: '\n Calculates top seller brands or top seller cities according to the given parameters.\n :param quantity_dict: quantity dictionary of products or stores\n :param product_store_df: dataframe of product.csv or store.csv files\n :param column_name: brand or store\n :return: top seller brands or top seller cities\n ' best_sellers: Dict[(str, int)] = {} for key in quantity_dict: brand = product_store_df.loc[((product_store_df['id'] == key), column_name)].iloc[0] if (brand in best_sellers): best_sellers[brand] += quantity_dict[key] else: best_sellers[brand] = quantity_dict[key] max_pairs: Dict[(str, int)] = on_equality_func(best_sellers) return pd.DataFrame(max_pairs.items(), columns=[column_name, 'quantity'])
def top_brands_cities(quantity_dict: Dict[(str, int)], product_store_df: pd.DataFrame, column_name: str) -> pd.DataFrame: '\n Calculates top seller brands or top seller cities according to the given parameters.\n :param quantity_dict: quantity dictionary of products or stores\n :param product_store_df: dataframe of product.csv or store.csv files\n :param column_name: brand or store\n :return: top seller brands or top seller cities\n ' best_sellers: Dict[(str, int)] = {} for key in quantity_dict: brand = product_store_df.loc[((product_store_df['id'] == key), column_name)].iloc[0] if (brand in best_sellers): best_sellers[brand] += quantity_dict[key] else: best_sellers[brand] = quantity_dict[key] max_pairs: Dict[(str, int)] = on_equality_func(best_sellers) return pd.DataFrame(max_pairs.items(), columns=[column_name, 'quantity'])<|docstring|>Calculates top seller brands or top seller cities according to the given parameters. :param quantity_dict: quantity dictionary of products or stores :param product_store_df: dataframe of product.csv or store.csv files :param column_name: brand or store :return: top seller brands or top seller cities<|endoftext|>
14be289595142cb167d83312da989f9ce5a7184c8862477f9eedf0d149c13857
def calc_quantity(sales_between_dates: pd.DataFrame, key: str) -> Dict[(str, int)]: '\n Calculates the quantity of each product and store.\n :param sales_between_dates: represents the sales in the given date interval\n :param key: product or store\n :return: returns the quantity dictionary of products or stores => {product/store: quantity}\n ' key_list = list(sales_between_dates[key]) quantities = list(sales_between_dates['quantity']) quantity_dict: Dict[(str, int)] = {} for i in range(len(key_list)): if (key_list[i] in quantity_dict): quantity_dict[key_list[i]] += quantities[i] else: quantity_dict[key_list[i]] = quantities[i] return quantity_dict
Calculates the quantity of each product and store. :param sales_between_dates: represents the sales in the given date interval :param key: product or store :return: returns the quantity dictionary of products or stores => {product/store: quantity}
python/solutions/q2/solution.py
calc_quantity
sertayy/CaseInterview
0
python
def calc_quantity(sales_between_dates: pd.DataFrame, key: str) -> Dict[(str, int)]: '\n Calculates the quantity of each product and store.\n :param sales_between_dates: represents the sales in the given date interval\n :param key: product or store\n :return: returns the quantity dictionary of products or stores => {product/store: quantity}\n ' key_list = list(sales_between_dates[key]) quantities = list(sales_between_dates['quantity']) quantity_dict: Dict[(str, int)] = {} for i in range(len(key_list)): if (key_list[i] in quantity_dict): quantity_dict[key_list[i]] += quantities[i] else: quantity_dict[key_list[i]] = quantities[i] return quantity_dict
def calc_quantity(sales_between_dates: pd.DataFrame, key: str) -> Dict[(str, int)]: '\n Calculates the quantity of each product and store.\n :param sales_between_dates: represents the sales in the given date interval\n :param key: product or store\n :return: returns the quantity dictionary of products or stores => {product/store: quantity}\n ' key_list = list(sales_between_dates[key]) quantities = list(sales_between_dates['quantity']) quantity_dict: Dict[(str, int)] = {} for i in range(len(key_list)): if (key_list[i] in quantity_dict): quantity_dict[key_list[i]] += quantities[i] else: quantity_dict[key_list[i]] = quantities[i] return quantity_dict<|docstring|>Calculates the quantity of each product and store. :param sales_between_dates: represents the sales in the given date interval :param key: product or store :return: returns the quantity dictionary of products or stores => {product/store: quantity}<|endoftext|>
73f3cae4815414f24a8cfd702a4efd84dfede5db22f2d0ec5bd14acc1dcf7136
def top_sellers(min_date: str, max_date: str): '\n Main simulation function.\n :param min_date: lower bound of date\n :param max_date: upper bound of date\n ' product_df = pd.read_csv('input/product.csv') sales_df = pd.read_csv('input/sales.csv') store_df = pd.read_csv('input/store.csv') date_interval: bool = ((sales_df['date'] >= min_date) & (sales_df['date'] <= max_date)) product_quantity = calc_quantity(sales_df.loc[date_interval], 'product') store_quantity = calc_quantity(sales_df.loc[date_interval], 'store') top_seller_product = top_products_stores(product_df, product_quantity) print('-- top seller product --') print(top_seller_product) top_seller_stores = top_products_stores(store_df, store_quantity) print('-- top seller store --') print(top_seller_stores) top_seller_brand = top_brands_cities(product_quantity, product_df, 'brand') print('-- top seller brand --') print(top_seller_brand) top_seller_cities = top_brands_cities(store_quantity, store_df, 'city') print('-- top seller city --') print(top_seller_cities)
Main simulation function. :param min_date: lower bound of date :param max_date: upper bound of date
python/solutions/q2/solution.py
top_sellers
sertayy/CaseInterview
0
python
def top_sellers(min_date: str, max_date: str): '\n Main simulation function.\n :param min_date: lower bound of date\n :param max_date: upper bound of date\n ' product_df = pd.read_csv('input/product.csv') sales_df = pd.read_csv('input/sales.csv') store_df = pd.read_csv('input/store.csv') date_interval: bool = ((sales_df['date'] >= min_date) & (sales_df['date'] <= max_date)) product_quantity = calc_quantity(sales_df.loc[date_interval], 'product') store_quantity = calc_quantity(sales_df.loc[date_interval], 'store') top_seller_product = top_products_stores(product_df, product_quantity) print('-- top seller product --') print(top_seller_product) top_seller_stores = top_products_stores(store_df, store_quantity) print('-- top seller store --') print(top_seller_stores) top_seller_brand = top_brands_cities(product_quantity, product_df, 'brand') print('-- top seller brand --') print(top_seller_brand) top_seller_cities = top_brands_cities(store_quantity, store_df, 'city') print('-- top seller city --') print(top_seller_cities)
def top_sellers(min_date: str, max_date: str): '\n Main simulation function.\n :param min_date: lower bound of date\n :param max_date: upper bound of date\n ' product_df = pd.read_csv('input/product.csv') sales_df = pd.read_csv('input/sales.csv') store_df = pd.read_csv('input/store.csv') date_interval: bool = ((sales_df['date'] >= min_date) & (sales_df['date'] <= max_date)) product_quantity = calc_quantity(sales_df.loc[date_interval], 'product') store_quantity = calc_quantity(sales_df.loc[date_interval], 'store') top_seller_product = top_products_stores(product_df, product_quantity) print('-- top seller product --') print(top_seller_product) top_seller_stores = top_products_stores(store_df, store_quantity) print('-- top seller store --') print(top_seller_stores) top_seller_brand = top_brands_cities(product_quantity, product_df, 'brand') print('-- top seller brand --') print(top_seller_brand) top_seller_cities = top_brands_cities(store_quantity, store_df, 'city') print('-- top seller city --') print(top_seller_cities)<|docstring|>Main simulation function. :param min_date: lower bound of date :param max_date: upper bound of date<|endoftext|>
567255859e826fb05c08cbd245405fc25a74bd409656fa397a20c3bddfa35ffd
def global_variable1(): 'Decalre a global variable, and assign a new value to a' global a a = 2 print(a)
Decalre a global variable, and assign a new value to a
function_global.py
global_variable1
YangZeCN/PythonLearningCenter
0
python
def global_variable1(): global a a = 2 print(a)
def global_variable1(): global a a = 2 print(a)<|docstring|>Decalre a global variable, and assign a new value to a<|endoftext|>
d7ebb05ccd3f6ff018d055f55cf0a40d6b1cbcb2be3d193db21e8cbc2ed3847e
def global_variable2(): "Although the variable a was declared at begin, but in this function, it's still a local one" a = 10 print(a)
Although the variable a was declared at begin, but in this function, it's still a local one
function_global.py
global_variable2
YangZeCN/PythonLearningCenter
0
python
def global_variable2(): a = 10 print(a)
def global_variable2(): a = 10 print(a)<|docstring|>Although the variable a was declared at begin, but in this function, it's still a local one<|endoftext|>
da1ca3d71f2a66363511a892662efb3507207b4486e3cacb8bd8c65788f70abf
def generate(keytype, keysize=2048): '\n Generate the same fromat of key object as it is in activeledger\n ' if (keysize <= 1024): raise ValueError('key size must larger than 1024') if (keytype == 'rsa'): private_key = rsa.generate_private_key(65537, keysize, default_backend()) public_key = private_key.public_key() key_object = {'pub': {'pkcs8pem': public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo).decode(), 'hash': hashlib.sha256(public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo)).hexdigest()}, 'prv': {'pkcs8pem': private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode(), 'hash': hashlib.sha256(private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption())).hexdigest()}} return key_object if (keytype == 'secp256k1'): private_key = ec.generate_private_key(ec.SECP256K1(), default_backend()) public_key = private_key.public_key() key_object = {'pub': {'pkcs8pem': public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo).decode(), 'hash': hashlib.sha256(public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo)).hexdigest()}, 'prv': {'pkcs8pem': private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode(), 'hash': hashlib.sha256(private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption())).hexdigest()}} return key_object raise ValueError('keytype unrecognized')
Generate the same fromat of key object as it is in activeledger
src/activeledgersdk/primitives/keypairs.py
generate
jialin-yu/activeLedgersdk
1
python
def generate(keytype, keysize=2048): '\n \n ' if (keysize <= 1024): raise ValueError('key size must larger than 1024') if (keytype == 'rsa'): private_key = rsa.generate_private_key(65537, keysize, default_backend()) public_key = private_key.public_key() key_object = {'pub': {'pkcs8pem': public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo).decode(), 'hash': hashlib.sha256(public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo)).hexdigest()}, 'prv': {'pkcs8pem': private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode(), 'hash': hashlib.sha256(private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption())).hexdigest()}} return key_object if (keytype == 'secp256k1'): private_key = ec.generate_private_key(ec.SECP256K1(), default_backend()) public_key = private_key.public_key() key_object = {'pub': {'pkcs8pem': public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo).decode(), 'hash': hashlib.sha256(public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo)).hexdigest()}, 'prv': {'pkcs8pem': private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode(), 'hash': hashlib.sha256(private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption())).hexdigest()}} return key_object raise ValueError('keytype unrecognized')
def generate(keytype, keysize=2048): '\n \n ' if (keysize <= 1024): raise ValueError('key size must larger than 1024') if (keytype == 'rsa'): private_key = rsa.generate_private_key(65537, keysize, default_backend()) public_key = private_key.public_key() key_object = {'pub': {'pkcs8pem': public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo).decode(), 'hash': hashlib.sha256(public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo)).hexdigest()}, 'prv': {'pkcs8pem': private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode(), 'hash': hashlib.sha256(private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption())).hexdigest()}} return key_object if (keytype == 'secp256k1'): private_key = ec.generate_private_key(ec.SECP256K1(), default_backend()) public_key = private_key.public_key() key_object = {'pub': {'pkcs8pem': public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo).decode(), 'hash': hashlib.sha256(public_key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo)).hexdigest()}, 'prv': {'pkcs8pem': private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode(), 'hash': hashlib.sha256(private_key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption())).hexdigest()}} return key_object raise ValueError('keytype unrecognized')<|docstring|>Generate the same fromat of key object as it is in activeledger<|endoftext|>
c679f3cd11025a91fdbefc10d2a7f7973dfd7215ecd65ece27052aa50c06fa9d
def verify(keytype, key_object): '\n Verification function to check if the public and private key pair in the \n key object is valid. In Python key_object is in dictionary format.\n ' if (type(key_object) is dict): try: pub_key = key_object.get('pub').get('pkcs8pem') prv_key = key_object.get('prv').get('pkcs8pem') private_key = serialization.load_pem_private_key(prv_key.encode(), None, default_backend()) public_key = serialization.load_pem_public_key(pub_key.encode(), default_backend()) message = b'key value verification' except: raise ValueError('key information error') else: raise TypeError('key object should be in dictionary format') if (keytype == 'rsa'): signature = private_key.sign(message, padding.PKCS1v15(), hashes.SHA256()) try: public_key.verify(signature, message, padding.PKCS1v15(), hashes.SHA256()) return True except: return False if (keytype == 'secp256k1'): signature = private_key.sign(message, ec.ECDSA(hashes.SHA256())) try: public_key.verify(signature, message, ec.ECDSA(hashes.SHA256())) return True except: return False raise ValueError('keytype unrecognized')
Verification function to check if the public and private key pair in the key object is valid. In Python key_object is in dictionary format.
src/activeledgersdk/primitives/keypairs.py
verify
jialin-yu/activeLedgersdk
1
python
def verify(keytype, key_object): '\n Verification function to check if the public and private key pair in the \n key object is valid. In Python key_object is in dictionary format.\n ' if (type(key_object) is dict): try: pub_key = key_object.get('pub').get('pkcs8pem') prv_key = key_object.get('prv').get('pkcs8pem') private_key = serialization.load_pem_private_key(prv_key.encode(), None, default_backend()) public_key = serialization.load_pem_public_key(pub_key.encode(), default_backend()) message = b'key value verification' except: raise ValueError('key information error') else: raise TypeError('key object should be in dictionary format') if (keytype == 'rsa'): signature = private_key.sign(message, padding.PKCS1v15(), hashes.SHA256()) try: public_key.verify(signature, message, padding.PKCS1v15(), hashes.SHA256()) return True except: return False if (keytype == 'secp256k1'): signature = private_key.sign(message, ec.ECDSA(hashes.SHA256())) try: public_key.verify(signature, message, ec.ECDSA(hashes.SHA256())) return True except: return False raise ValueError('keytype unrecognized')
def verify(keytype, key_object): '\n Verification function to check if the public and private key pair in the \n key object is valid. In Python key_object is in dictionary format.\n ' if (type(key_object) is dict): try: pub_key = key_object.get('pub').get('pkcs8pem') prv_key = key_object.get('prv').get('pkcs8pem') private_key = serialization.load_pem_private_key(prv_key.encode(), None, default_backend()) public_key = serialization.load_pem_public_key(pub_key.encode(), default_backend()) message = b'key value verification' except: raise ValueError('key information error') else: raise TypeError('key object should be in dictionary format') if (keytype == 'rsa'): signature = private_key.sign(message, padding.PKCS1v15(), hashes.SHA256()) try: public_key.verify(signature, message, padding.PKCS1v15(), hashes.SHA256()) return True except: return False if (keytype == 'secp256k1'): signature = private_key.sign(message, ec.ECDSA(hashes.SHA256())) try: public_key.verify(signature, message, ec.ECDSA(hashes.SHA256())) return True except: return False raise ValueError('keytype unrecognized')<|docstring|>Verification function to check if the public and private key pair in the key object is valid. In Python key_object is in dictionary format.<|endoftext|>
f711a7f5ab3513f2b82b824fe0e47045ff7941827f1478eebba161bf6df761ef
def sign(keytype, key_object, message): '\n sign function return a string from a message signed by private key\n the message should be in dic format\n private key is derived from key object which is in dic format\n ' if ((type(message) is dict) and (type(key_object) is dict)): try: prv_key = key_object.get('prv').get('pkcs8pem') private_key = serialization.load_pem_private_key(prv_key.encode(), None, default_backend()) message = json.dumps(message, separators=(',', ':')).encode() except: raise ValueError('key information error') if (keytype == 'rsa'): signature = private_key.sign(message, padding.PKCS1v15(), hashes.SHA256()) sig_string = base64.b64encode(signature).decode() return sig_string if (keytype == 'secp256k1'): signature = private_key.sign(message, ec.ECDSA(hashes.SHA256())) sig_string = base64.b64encode(signature).decode() return sig_string raise ValueError('keytype unrecognized') raise TypeError('type dont recognize')
sign function return a string from a message signed by private key the message should be in dic format private key is derived from key object which is in dic format
src/activeledgersdk/primitives/keypairs.py
sign
jialin-yu/activeLedgersdk
1
python
def sign(keytype, key_object, message): '\n sign function return a string from a message signed by private key\n the message should be in dic format\n private key is derived from key object which is in dic format\n ' if ((type(message) is dict) and (type(key_object) is dict)): try: prv_key = key_object.get('prv').get('pkcs8pem') private_key = serialization.load_pem_private_key(prv_key.encode(), None, default_backend()) message = json.dumps(message, separators=(',', ':')).encode() except: raise ValueError('key information error') if (keytype == 'rsa'): signature = private_key.sign(message, padding.PKCS1v15(), hashes.SHA256()) sig_string = base64.b64encode(signature).decode() return sig_string if (keytype == 'secp256k1'): signature = private_key.sign(message, ec.ECDSA(hashes.SHA256())) sig_string = base64.b64encode(signature).decode() return sig_string raise ValueError('keytype unrecognized') raise TypeError('type dont recognize')
def sign(keytype, key_object, message): '\n sign function return a string from a message signed by private key\n the message should be in dic format\n private key is derived from key object which is in dic format\n ' if ((type(message) is dict) and (type(key_object) is dict)): try: prv_key = key_object.get('prv').get('pkcs8pem') private_key = serialization.load_pem_private_key(prv_key.encode(), None, default_backend()) message = json.dumps(message, separators=(',', ':')).encode() except: raise ValueError('key information error') if (keytype == 'rsa'): signature = private_key.sign(message, padding.PKCS1v15(), hashes.SHA256()) sig_string = base64.b64encode(signature).decode() return sig_string if (keytype == 'secp256k1'): signature = private_key.sign(message, ec.ECDSA(hashes.SHA256())) sig_string = base64.b64encode(signature).decode() return sig_string raise ValueError('keytype unrecognized') raise TypeError('type dont recognize')<|docstring|>sign function return a string from a message signed by private key the message should be in dic format private key is derived from key object which is in dic format<|endoftext|>
4f50eafd5d0f69112c16be64ca8156eb379ca50d28386691f19e3df995961979
def validate(self, spec): '\n Checks that .icon file names do not contain spaces\n ' d = spec.directory for file_name in os.listdir(d): if file_name.endswith('.icon'): if (' ' in file_name): raise ValidationException(f'''The .icon file name was '{file_name}'. .icon file may not contain spaces use a '_' instead.''')
Checks that .icon file names do not contain spaces
icon_validator/rules/workflow_validators/workflow_icon_filename_validator.py
validate
rapid7/icon-integrations-validators
6
python
def validate(self, spec): '\n \n ' d = spec.directory for file_name in os.listdir(d): if file_name.endswith('.icon'): if (' ' in file_name): raise ValidationException(f'The .icon file name was '{file_name}'. .icon file may not contain spaces use a '_' instead.')
def validate(self, spec): '\n \n ' d = spec.directory for file_name in os.listdir(d): if file_name.endswith('.icon'): if (' ' in file_name): raise ValidationException(f'The .icon file name was '{file_name}'. .icon file may not contain spaces use a '_' instead.')<|docstring|>Checks that .icon file names do not contain spaces<|endoftext|>
5f9a8e9001dd0d1e0c0975b2b881e826a6b1494ebb1472ccdb722f33f84e4169
def train_aoi2(output_path, data_fpath, labels_fpath, purge=True): '\n CommandLine:\n python -m wbia_cnn.train --test-train_aoi2\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia_cnn.train import * # NOQA\n >>> result = train_aoi2()\n >>> print(result)\n ' era_size = 16 max_epochs = 256 hyperparams = ut.argparse_dict({'era_size': era_size, 'batch_size': 32, 'learning_rate': 0.01, 'rate_schedule': 0.5, 'momentum': 0.9, 'weight_decay': 0.0001, 'augment_on': True, 'augment_on_validate': True, 'augment_weights': True, 'label_encode_on': False, 'whiten_on': True, 'class_weight': None, 'max_epochs': max_epochs}) ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.get_numpy_dataset2('aoi2', data_fpath, labels_fpath, output_path, cache=False) (X_train, y_train) = dataset.subset('train') (X_valid, y_valid) = dataset.subset('valid') print(('dataset.training_dpath = %r' % (dataset.training_dpath,))) if purge: model = AoI2Model(data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) model.output_dims = 2 model.init_arch() ut.delete(model.arch_dpath) ut.colorprint('[netrun] Architecture Specification', 'yellow') model = AoI2Model(data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) ut.colorprint('[netrun] Initialize archchitecture', 'yellow') model.output_dims = 2 model.init_arch() ut.colorprint('[netrun] * Initializing new weights', 'lightgray') if model.has_saved_state(): model.load_model_state() ut.colorprint('[netrun] Training Requested', 'yellow') config = ut.argparse_dict(dict(monitor=False, monitor_updates=False, show_confusion=True, era_size=era_size, max_epochs=max_epochs)) model.monitor_config.update(**config) if (getattr(model, 'encoder', None) is not None): class_list = list(model.encoder.classes_) y_train = np.array([class_list.index(_) for _ in y_train]) y_valid = np.array([class_list.index(_) for _ in y_valid]) print('\n[netrun] Model Info') model.print_layer_info() ut.colorprint('[netrun] Begin training', 'yellow') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) model_path = model.save_model_state() return model_path
CommandLine: python -m wbia_cnn.train --test-train_aoi2 Example: >>> # DISABLE_DOCTEST >>> from wbia_cnn.train import * # NOQA >>> result = train_aoi2() >>> print(result)
wbia_cnn/models/aoi2.py
train_aoi2
dylanirion/wbia-plugin-cnn
0
python
def train_aoi2(output_path, data_fpath, labels_fpath, purge=True): '\n CommandLine:\n python -m wbia_cnn.train --test-train_aoi2\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia_cnn.train import * # NOQA\n >>> result = train_aoi2()\n >>> print(result)\n ' era_size = 16 max_epochs = 256 hyperparams = ut.argparse_dict({'era_size': era_size, 'batch_size': 32, 'learning_rate': 0.01, 'rate_schedule': 0.5, 'momentum': 0.9, 'weight_decay': 0.0001, 'augment_on': True, 'augment_on_validate': True, 'augment_weights': True, 'label_encode_on': False, 'whiten_on': True, 'class_weight': None, 'max_epochs': max_epochs}) ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.get_numpy_dataset2('aoi2', data_fpath, labels_fpath, output_path, cache=False) (X_train, y_train) = dataset.subset('train') (X_valid, y_valid) = dataset.subset('valid') print(('dataset.training_dpath = %r' % (dataset.training_dpath,))) if purge: model = AoI2Model(data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) model.output_dims = 2 model.init_arch() ut.delete(model.arch_dpath) ut.colorprint('[netrun] Architecture Specification', 'yellow') model = AoI2Model(data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) ut.colorprint('[netrun] Initialize archchitecture', 'yellow') model.output_dims = 2 model.init_arch() ut.colorprint('[netrun] * Initializing new weights', 'lightgray') if model.has_saved_state(): model.load_model_state() ut.colorprint('[netrun] Training Requested', 'yellow') config = ut.argparse_dict(dict(monitor=False, monitor_updates=False, show_confusion=True, era_size=era_size, max_epochs=max_epochs)) model.monitor_config.update(**config) if (getattr(model, 'encoder', None) is not None): class_list = list(model.encoder.classes_) y_train = np.array([class_list.index(_) for _ in y_train]) y_valid = np.array([class_list.index(_) for _ in y_valid]) print('\n[netrun] Model Info') model.print_layer_info() ut.colorprint('[netrun] Begin training', 'yellow') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) model_path = model.save_model_state() return model_path
def train_aoi2(output_path, data_fpath, labels_fpath, purge=True): '\n CommandLine:\n python -m wbia_cnn.train --test-train_aoi2\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia_cnn.train import * # NOQA\n >>> result = train_aoi2()\n >>> print(result)\n ' era_size = 16 max_epochs = 256 hyperparams = ut.argparse_dict({'era_size': era_size, 'batch_size': 32, 'learning_rate': 0.01, 'rate_schedule': 0.5, 'momentum': 0.9, 'weight_decay': 0.0001, 'augment_on': True, 'augment_on_validate': True, 'augment_weights': True, 'label_encode_on': False, 'whiten_on': True, 'class_weight': None, 'max_epochs': max_epochs}) ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.get_numpy_dataset2('aoi2', data_fpath, labels_fpath, output_path, cache=False) (X_train, y_train) = dataset.subset('train') (X_valid, y_valid) = dataset.subset('valid') print(('dataset.training_dpath = %r' % (dataset.training_dpath,))) if purge: model = AoI2Model(data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) model.output_dims = 2 model.init_arch() ut.delete(model.arch_dpath) ut.colorprint('[netrun] Architecture Specification', 'yellow') model = AoI2Model(data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) ut.colorprint('[netrun] Initialize archchitecture', 'yellow') model.output_dims = 2 model.init_arch() ut.colorprint('[netrun] * Initializing new weights', 'lightgray') if model.has_saved_state(): model.load_model_state() ut.colorprint('[netrun] Training Requested', 'yellow') config = ut.argparse_dict(dict(monitor=False, monitor_updates=False, show_confusion=True, era_size=era_size, max_epochs=max_epochs)) model.monitor_config.update(**config) if (getattr(model, 'encoder', None) is not None): class_list = list(model.encoder.classes_) y_train = np.array([class_list.index(_) for _ in y_train]) y_valid = np.array([class_list.index(_) for _ in y_valid]) print('\n[netrun] Model Info') model.print_layer_info() ut.colorprint('[netrun] Begin training', 'yellow') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) model_path = model.save_model_state() return model_path<|docstring|>CommandLine: python -m wbia_cnn.train --test-train_aoi2 Example: >>> # DISABLE_DOCTEST >>> from wbia_cnn.train import * # NOQA >>> result = train_aoi2() >>> print(result)<|endoftext|>
51404487be9e13c2bf2f1c35bfcfb50759bd31465d789bd30e38cb3449d44e06
def parse_args(): '\n\tParses the node2vec arguments.\n\t' parser = argparse.ArgumentParser(description='Run node2vec.') parser.add_argument('--input', nargs='?', default='graph/karate.edgelist', help='Input graph path') parser.add_argument('--output', nargs='?', default='emb/karate.emb', help='Embeddings path') parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.') parser.add_argument('--walk-length', type=int, default=16, help='Length of walk per source. Default is 80.') parser.add_argument('--num-walks', type=int, default=10, help='Number of walks per source. Default is 10.') parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.') parser.add_argument('--iter', default=9, type=int, help='Number of epochs in SGD') parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.') parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.') parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.') parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.') parser.add_argument('--unweighted', dest='unweighted', action='store_false') parser.set_defaults(weighted=False) parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.') parser.add_argument('--undirected', dest='undirected', action='store_false') parser.set_defaults(directed=False) return parser.parse_args()
Parses the node2vec arguments.
embeddings/real_model_main.py
parse_args
saimmehmood/POI_Clustering
0
python
def parse_args(): '\n\t\n\t' parser = argparse.ArgumentParser(description='Run node2vec.') parser.add_argument('--input', nargs='?', default='graph/karate.edgelist', help='Input graph path') parser.add_argument('--output', nargs='?', default='emb/karate.emb', help='Embeddings path') parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.') parser.add_argument('--walk-length', type=int, default=16, help='Length of walk per source. Default is 80.') parser.add_argument('--num-walks', type=int, default=10, help='Number of walks per source. Default is 10.') parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.') parser.add_argument('--iter', default=9, type=int, help='Number of epochs in SGD') parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.') parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.') parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.') parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.') parser.add_argument('--unweighted', dest='unweighted', action='store_false') parser.set_defaults(weighted=False) parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.') parser.add_argument('--undirected', dest='undirected', action='store_false') parser.set_defaults(directed=False) return parser.parse_args()
def parse_args(): '\n\t\n\t' parser = argparse.ArgumentParser(description='Run node2vec.') parser.add_argument('--input', nargs='?', default='graph/karate.edgelist', help='Input graph path') parser.add_argument('--output', nargs='?', default='emb/karate.emb', help='Embeddings path') parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.') parser.add_argument('--walk-length', type=int, default=16, help='Length of walk per source. Default is 80.') parser.add_argument('--num-walks', type=int, default=10, help='Number of walks per source. Default is 10.') parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.') parser.add_argument('--iter', default=9, type=int, help='Number of epochs in SGD') parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.') parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.') parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.') parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.') parser.add_argument('--unweighted', dest='unweighted', action='store_false') parser.set_defaults(weighted=False) parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.') parser.add_argument('--undirected', dest='undirected', action='store_false') parser.set_defaults(directed=False) return parser.parse_args()<|docstring|>Parses the node2vec arguments.<|endoftext|>
b1d9c5a6dcaff186adc7be31d068904b04e5621ca83c8ed1f852fd665a8b2361
def read_graph(): '\n\tReads the input network in networkx.\n\t' if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph()) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if (not args.directed): G = G.to_undirected() return G
Reads the input network in networkx.
embeddings/real_model_main.py
read_graph
saimmehmood/POI_Clustering
0
python
def read_graph(): '\n\t\n\t' if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph()) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if (not args.directed): G = G.to_undirected() return G
def read_graph(): '\n\t\n\t' if args.weighted: G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph()) else: G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph()) for edge in G.edges(): G[edge[0]][edge[1]]['weight'] = 1 if (not args.directed): G = G.to_undirected() return G<|docstring|>Reads the input network in networkx.<|endoftext|>
d322602fa71ca53887ebe8c7adf4e4cb8fa286e80e147561b2af98b1a74a27c8
def learn_embeddings(walks): '\n\tLearn embeddings by optimizing the Skipgram objective using SGD.\n\t' model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter) model.wv.save_word2vec_format(args.output) return
Learn embeddings by optimizing the Skipgram objective using SGD.
embeddings/real_model_main.py
learn_embeddings
saimmehmood/POI_Clustering
0
python
def learn_embeddings(walks): '\n\t\n\t' model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter) model.wv.save_word2vec_format(args.output) return
def learn_embeddings(walks): '\n\t\n\t' model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter) model.wv.save_word2vec_format(args.output) return<|docstring|>Learn embeddings by optimizing the Skipgram objective using SGD.<|endoftext|>
78e1704389c0834f7a8797159a8a7788a8d7710c4552dad2d57472bab21e22b5
def main(args): '\n\tPipeline for representational learning for all nodes in a graph.\n\t' walks = [] walk = [] with open('walks_ten_10.txt') as file: for line in file: tmp = line.strip('\n').replace('[', '').replace(']', '').replace("'", '').replace(',', '') t = tmp.split(' ') for i in range(len(t)): walk.append(t[i]) walks.append(walk.copy()) walk.clear() learn_embeddings(walks)
Pipeline for representational learning for all nodes in a graph.
embeddings/real_model_main.py
main
saimmehmood/POI_Clustering
0
python
def main(args): '\n\t\n\t' walks = [] walk = [] with open('walks_ten_10.txt') as file: for line in file: tmp = line.strip('\n').replace('[', ).replace(']', ).replace("'", ).replace(',', ) t = tmp.split(' ') for i in range(len(t)): walk.append(t[i]) walks.append(walk.copy()) walk.clear() learn_embeddings(walks)
def main(args): '\n\t\n\t' walks = [] walk = [] with open('walks_ten_10.txt') as file: for line in file: tmp = line.strip('\n').replace('[', ).replace(']', ).replace("'", ).replace(',', ) t = tmp.split(' ') for i in range(len(t)): walk.append(t[i]) walks.append(walk.copy()) walk.clear() learn_embeddings(walks)<|docstring|>Pipeline for representational learning for all nodes in a graph.<|endoftext|>
4bc3a1173101919d8eb2e9bcb7eb1a4f4454ffb40581ac21245babcdbed5ba2d
def optimization(self, model='HRP', correlation='pearson', covariance='hist', rm='MV', rf=0, linkage='single', k=None, max_k=10, leaf_order=True, d=0.94): "\n This method calculates the optimal portfolio according to the\n optimization model selected by the user.\n\n Parameters\n ----------\n model : str can be {'HRP', 'HERC' or 'HERC2'}\n The hierarchical cluster portfolio model used for optimize the\n portfolio. The default is 'HRP'. Posible values are:\n\n - 'HRP': Hierarchical Risk Parity.\n - 'HERC': Hierarchical Equal Risk Contribution.\n - 'HERC2': HERC but splitting weights equally within clusters.\n\n correlation : str can be {'pearson', 'spearman' or 'distance'}.\n The correlation matrix used for create the clusters.\n The default is 'pearson'. Posible values are:\n\n - 'pearson': pearson correlation matrix.\n - 'spearman': spearman correlation matrix.\n - 'abs_pearson': absolute value pearson correlation matrix.\n - 'abs_spearman': absolute value spearman correlation matrix.\n - 'distance': distance correlation matrix.\n\n covariance : str, can be {'hist', 'ewma1', 'ewma2', 'ledoit', 'oas' or 'shrunk'}\n The method used to estimate the covariance matrix:\n The default is 'hist'.\n\n - 'hist': use historical estimates.\n - 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.\n - 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.\n - 'ledoit': use the Ledoit and Wolf Shrinkage method.\n - 'oas': use the Oracle Approximation Shrinkage method.\n - 'shrunk': use the basic Shrunk Covariance method.\n\n rm : str, optional\n The risk measure used to optimze the portfolio.\n The default is 'MV'. Posible values are:\n\n - 'equal': Equally weighted.\n - 'vol': Standard Deviation.\n - 'MV': Variance.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'VaR': Value at Risk.\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Entropic Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded cumulative returns.\n - 'DaR': Drawdown at Risk of uncompounded cumulative returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.\n - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.\n - 'UCI': Ulcer Index of uncompounded cumulative returns.\n - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).\n - 'ADD_Rel': Average Drawdown of compounded cumulative returns.\n - 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.\n - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.\n - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.\n - 'UCI_Rel': Ulcer Index of compounded cumulative returns.\n\n rf : float, optional\n Risk free rate, must be in the same period of assets returns.\n The default is 0.\n linkage : string, optional\n Linkage method of hierarchical clustering, see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`_ for more details.\n The default is 'single'. Posible values are:\n\n - 'single'.\n - 'complete'.\n - 'average'.\n - 'weighted'.\n - 'centroid'.\n - 'median'.\n - 'ward'.\n\n k : int, optional\n Number of clusters. This value is took instead of the optimal number\n of clusters calculated with the two difference gap statistic.\n The default is None.\n max_k : int, optional\n Max number of clusters used by the two difference gap statistic\n to find the optimal number of clusters. The default is 10.\n leaf_order : bool, optional\n Indicates if the cluster are ordered so that the distance between\n successive leaves is minimal. The default is True.\n d : scalar\n The smoothing factor of ewma methods.\n The default is 0.94.\n\n Returns\n -------\n w : DataFrame\n The weights of optimal portfolio.\n\n " self.cov = pe.covar_matrix(self.returns, method=covariance, d=0.94) if (correlation in {'pearson', 'spearman'}): self.corr = self.returns.corr(method=correlation).astype(float) if (correlation in {'abs_pearson', 'abs_spearman'}): self.corr = np.abs(self.returns.corr(method=correlation[4:])).astype(float) elif (correlation == 'distance'): self.corr = af.dcorr_matrix(self.returns).astype(float) if (model == 'HRP'): self.clusters = self._hierarchical_clustering_hrp(linkage, leaf_order=leaf_order) elif (model in ['HERC', 'HERC2']): (self.clusters, self.k) = self._hierarchical_clustering_herc(linkage, max_k, leaf_order=leaf_order) if (k is not None): self.k = int(k) self.sort_order = self._seriation(self.clusters) asset_order = self.assetslist asset_order[:] = [self.assetslist[i] for i in self.sort_order] self.asset_order = asset_order self.corr_sorted = self.corr.reindex(index=self.asset_order, columns=self.asset_order) if (model == 'HRP'): weights = self._recursive_bisection(self.sort_order, rm=rm, rf=rf) elif (model in ['HERC', 'HERC2']): weights = self._hierarchical_recursive_bisection(self.clusters, rm=rm, rf=rf, linkage=linkage, model=model) weights = weights.loc[self.assetslist].to_frame() weights.columns = ['weights'] return weights
This method calculates the optimal portfolio according to the optimization model selected by the user. Parameters ---------- model : str can be {'HRP', 'HERC' or 'HERC2'} The hierarchical cluster portfolio model used for optimize the portfolio. The default is 'HRP'. Posible values are: - 'HRP': Hierarchical Risk Parity. - 'HERC': Hierarchical Equal Risk Contribution. - 'HERC2': HERC but splitting weights equally within clusters. correlation : str can be {'pearson', 'spearman' or 'distance'}. The correlation matrix used for create the clusters. The default is 'pearson'. Posible values are: - 'pearson': pearson correlation matrix. - 'spearman': spearman correlation matrix. - 'abs_pearson': absolute value pearson correlation matrix. - 'abs_spearman': absolute value spearman correlation matrix. - 'distance': distance correlation matrix. covariance : str, can be {'hist', 'ewma1', 'ewma2', 'ledoit', 'oas' or 'shrunk'} The method used to estimate the covariance matrix: The default is 'hist'. - 'hist': use historical estimates. - 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details. - 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details. - 'ledoit': use the Ledoit and Wolf Shrinkage method. - 'oas': use the Oracle Approximation Shrinkage method. - 'shrunk': use the basic Shrunk Covariance method. rm : str, optional The risk measure used to optimze the portfolio. The default is 'MV'. Posible values are: - 'equal': Equally weighted. - 'vol': Standard Deviation. - 'MV': Variance. - 'MAD': Mean Absolute Deviation. - 'MSV': Semi Standard Deviation. - 'FLPM': First Lower Partial Moment (Omega Ratio). - 'SLPM': Second Lower Partial Moment (Sortino Ratio). - 'VaR': Value at Risk. - 'CVaR': Conditional Value at Risk. - 'EVaR': Entropic Value at Risk. - 'WR': Worst Realization (Minimax) - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio). - 'ADD': Average Drawdown of uncompounded cumulative returns. - 'DaR': Drawdown at Risk of uncompounded cumulative returns. - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns. - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns. - 'UCI': Ulcer Index of uncompounded cumulative returns. - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio). - 'ADD_Rel': Average Drawdown of compounded cumulative returns. - 'DaR_Rel': Drawdown at Risk of compounded cumulative returns. - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns. - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns. - 'UCI_Rel': Ulcer Index of compounded cumulative returns. rf : float, optional Risk free rate, must be in the same period of assets returns. The default is 0. linkage : string, optional Linkage method of hierarchical clustering, see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`_ for more details. The default is 'single'. Posible values are: - 'single'. - 'complete'. - 'average'. - 'weighted'. - 'centroid'. - 'median'. - 'ward'. k : int, optional Number of clusters. This value is took instead of the optimal number of clusters calculated with the two difference gap statistic. The default is None. max_k : int, optional Max number of clusters used by the two difference gap statistic to find the optimal number of clusters. The default is 10. leaf_order : bool, optional Indicates if the cluster are ordered so that the distance between successive leaves is minimal. The default is True. d : scalar The smoothing factor of ewma methods. The default is 0.94. Returns ------- w : DataFrame The weights of optimal portfolio.
riskfolio/HCPortfolio.py
optimization
TandMcorp/Riskfolio-Lib
1
python
def optimization(self, model='HRP', correlation='pearson', covariance='hist', rm='MV', rf=0, linkage='single', k=None, max_k=10, leaf_order=True, d=0.94): "\n This method calculates the optimal portfolio according to the\n optimization model selected by the user.\n\n Parameters\n ----------\n model : str can be {'HRP', 'HERC' or 'HERC2'}\n The hierarchical cluster portfolio model used for optimize the\n portfolio. The default is 'HRP'. Posible values are:\n\n - 'HRP': Hierarchical Risk Parity.\n - 'HERC': Hierarchical Equal Risk Contribution.\n - 'HERC2': HERC but splitting weights equally within clusters.\n\n correlation : str can be {'pearson', 'spearman' or 'distance'}.\n The correlation matrix used for create the clusters.\n The default is 'pearson'. Posible values are:\n\n - 'pearson': pearson correlation matrix.\n - 'spearman': spearman correlation matrix.\n - 'abs_pearson': absolute value pearson correlation matrix.\n - 'abs_spearman': absolute value spearman correlation matrix.\n - 'distance': distance correlation matrix.\n\n covariance : str, can be {'hist', 'ewma1', 'ewma2', 'ledoit', 'oas' or 'shrunk'}\n The method used to estimate the covariance matrix:\n The default is 'hist'.\n\n - 'hist': use historical estimates.\n - 'ewma1: use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.\n - 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.\n - 'ledoit': use the Ledoit and Wolf Shrinkage method.\n - 'oas': use the Oracle Approximation Shrinkage method.\n - 'shrunk': use the basic Shrunk Covariance method.\n\n rm : str, optional\n The risk measure used to optimze the portfolio.\n The default is 'MV'. Posible values are:\n\n - 'equal': Equally weighted.\n - 'vol': Standard Deviation.\n - 'MV': Variance.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'VaR': Value at Risk.\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Entropic Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded cumulative returns.\n - 'DaR': Drawdown at Risk of uncompounded cumulative returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.\n - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.\n - 'UCI': Ulcer Index of uncompounded cumulative returns.\n - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).\n - 'ADD_Rel': Average Drawdown of compounded cumulative returns.\n - 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.\n - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.\n - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.\n - 'UCI_Rel': Ulcer Index of compounded cumulative returns.\n\n rf : float, optional\n Risk free rate, must be in the same period of assets returns.\n The default is 0.\n linkage : string, optional\n Linkage method of hierarchical clustering, see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`_ for more details.\n The default is 'single'. Posible values are:\n\n - 'single'.\n - 'complete'.\n - 'average'.\n - 'weighted'.\n - 'centroid'.\n - 'median'.\n - 'ward'.\n\n k : int, optional\n Number of clusters. This value is took instead of the optimal number\n of clusters calculated with the two difference gap statistic.\n The default is None.\n max_k : int, optional\n Max number of clusters used by the two difference gap statistic\n to find the optimal number of clusters. The default is 10.\n leaf_order : bool, optional\n Indicates if the cluster are ordered so that the distance between\n successive leaves is minimal. The default is True.\n d : scalar\n The smoothing factor of ewma methods.\n The default is 0.94.\n\n Returns\n -------\n w : DataFrame\n The weights of optimal portfolio.\n\n " self.cov = pe.covar_matrix(self.returns, method=covariance, d=0.94) if (correlation in {'pearson', 'spearman'}): self.corr = self.returns.corr(method=correlation).astype(float) if (correlation in {'abs_pearson', 'abs_spearman'}): self.corr = np.abs(self.returns.corr(method=correlation[4:])).astype(float) elif (correlation == 'distance'): self.corr = af.dcorr_matrix(self.returns).astype(float) if (model == 'HRP'): self.clusters = self._hierarchical_clustering_hrp(linkage, leaf_order=leaf_order) elif (model in ['HERC', 'HERC2']): (self.clusters, self.k) = self._hierarchical_clustering_herc(linkage, max_k, leaf_order=leaf_order) if (k is not None): self.k = int(k) self.sort_order = self._seriation(self.clusters) asset_order = self.assetslist asset_order[:] = [self.assetslist[i] for i in self.sort_order] self.asset_order = asset_order self.corr_sorted = self.corr.reindex(index=self.asset_order, columns=self.asset_order) if (model == 'HRP'): weights = self._recursive_bisection(self.sort_order, rm=rm, rf=rf) elif (model in ['HERC', 'HERC2']): weights = self._hierarchical_recursive_bisection(self.clusters, rm=rm, rf=rf, linkage=linkage, model=model) weights = weights.loc[self.assetslist].to_frame() weights.columns = ['weights'] return weights
def optimization(self, model='HRP', correlation='pearson', covariance='hist', rm='MV', rf=0, linkage='single', k=None, max_k=10, leaf_order=True, d=0.94): "\n This method calculates the optimal portfolio according to the\n optimization model selected by the user.\n\n Parameters\n ----------\n model : str can be {'HRP', 'HERC' or 'HERC2'}\n The hierarchical cluster portfolio model used for optimize the\n portfolio. The default is 'HRP'. Posible values are:\n\n - 'HRP': Hierarchical Risk Parity.\n - 'HERC': Hierarchical Equal Risk Contribution.\n - 'HERC2': HERC but splitting weights equally within clusters.\n\n correlation : str can be {'pearson', 'spearman' or 'distance'}.\n The correlation matrix used for create the clusters.\n The default is 'pearson'. Posible values are:\n\n - 'pearson': pearson correlation matrix.\n - 'spearman': spearman correlation matrix.\n - 'abs_pearson': absolute value pearson correlation matrix.\n - 'abs_spearman': absolute value spearman correlation matrix.\n - 'distance': distance correlation matrix.\n\n covariance : str, can be {'hist', 'ewma1', 'ewma2', 'ledoit', 'oas' or 'shrunk'}\n The method used to estimate the covariance matrix:\n The default is 'hist'.\n\n - 'hist': use historical estimates.\n - 'ewma1: use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.\n - 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.\n - 'ledoit': use the Ledoit and Wolf Shrinkage method.\n - 'oas': use the Oracle Approximation Shrinkage method.\n - 'shrunk': use the basic Shrunk Covariance method.\n\n rm : str, optional\n The risk measure used to optimze the portfolio.\n The default is 'MV'. Posible values are:\n\n - 'equal': Equally weighted.\n - 'vol': Standard Deviation.\n - 'MV': Variance.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'VaR': Value at Risk.\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Entropic Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded cumulative returns.\n - 'DaR': Drawdown at Risk of uncompounded cumulative returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.\n - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.\n - 'UCI': Ulcer Index of uncompounded cumulative returns.\n - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).\n - 'ADD_Rel': Average Drawdown of compounded cumulative returns.\n - 'DaR_Rel': Drawdown at Risk of compounded cumulative returns.\n - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.\n - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.\n - 'UCI_Rel': Ulcer Index of compounded cumulative returns.\n\n rf : float, optional\n Risk free rate, must be in the same period of assets returns.\n The default is 0.\n linkage : string, optional\n Linkage method of hierarchical clustering, see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`_ for more details.\n The default is 'single'. Posible values are:\n\n - 'single'.\n - 'complete'.\n - 'average'.\n - 'weighted'.\n - 'centroid'.\n - 'median'.\n - 'ward'.\n\n k : int, optional\n Number of clusters. This value is took instead of the optimal number\n of clusters calculated with the two difference gap statistic.\n The default is None.\n max_k : int, optional\n Max number of clusters used by the two difference gap statistic\n to find the optimal number of clusters. The default is 10.\n leaf_order : bool, optional\n Indicates if the cluster are ordered so that the distance between\n successive leaves is minimal. The default is True.\n d : scalar\n The smoothing factor of ewma methods.\n The default is 0.94.\n\n Returns\n -------\n w : DataFrame\n The weights of optimal portfolio.\n\n " self.cov = pe.covar_matrix(self.returns, method=covariance, d=0.94) if (correlation in {'pearson', 'spearman'}): self.corr = self.returns.corr(method=correlation).astype(float) if (correlation in {'abs_pearson', 'abs_spearman'}): self.corr = np.abs(self.returns.corr(method=correlation[4:])).astype(float) elif (correlation == 'distance'): self.corr = af.dcorr_matrix(self.returns).astype(float) if (model == 'HRP'): self.clusters = self._hierarchical_clustering_hrp(linkage, leaf_order=leaf_order) elif (model in ['HERC', 'HERC2']): (self.clusters, self.k) = self._hierarchical_clustering_herc(linkage, max_k, leaf_order=leaf_order) if (k is not None): self.k = int(k) self.sort_order = self._seriation(self.clusters) asset_order = self.assetslist asset_order[:] = [self.assetslist[i] for i in self.sort_order] self.asset_order = asset_order self.corr_sorted = self.corr.reindex(index=self.asset_order, columns=self.asset_order) if (model == 'HRP'): weights = self._recursive_bisection(self.sort_order, rm=rm, rf=rf) elif (model in ['HERC', 'HERC2']): weights = self._hierarchical_recursive_bisection(self.clusters, rm=rm, rf=rf, linkage=linkage, model=model) weights = weights.loc[self.assetslist].to_frame() weights.columns = ['weights'] return weights<|docstring|>This method calculates the optimal portfolio according to the optimization model selected by the user. Parameters ---------- model : str can be {'HRP', 'HERC' or 'HERC2'} The hierarchical cluster portfolio model used for optimize the portfolio. The default is 'HRP'. Posible values are: - 'HRP': Hierarchical Risk Parity. - 'HERC': Hierarchical Equal Risk Contribution. - 'HERC2': HERC but splitting weights equally within clusters. correlation : str can be {'pearson', 'spearman' or 'distance'}. The correlation matrix used for create the clusters. The default is 'pearson'. Posible values are: - 'pearson': pearson correlation matrix. - 'spearman': spearman correlation matrix. - 'abs_pearson': absolute value pearson correlation matrix. - 'abs_spearman': absolute value spearman correlation matrix. - 'distance': distance correlation matrix. covariance : str, can be {'hist', 'ewma1', 'ewma2', 'ledoit', 'oas' or 'shrunk'} The method used to estimate the covariance matrix: The default is 'hist'. - 'hist': use historical estimates. - 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details. - 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details. - 'ledoit': use the Ledoit and Wolf Shrinkage method. - 'oas': use the Oracle Approximation Shrinkage method. - 'shrunk': use the basic Shrunk Covariance method. rm : str, optional The risk measure used to optimze the portfolio. The default is 'MV'. Posible values are: - 'equal': Equally weighted. - 'vol': Standard Deviation. - 'MV': Variance. - 'MAD': Mean Absolute Deviation. - 'MSV': Semi Standard Deviation. - 'FLPM': First Lower Partial Moment (Omega Ratio). - 'SLPM': Second Lower Partial Moment (Sortino Ratio). - 'VaR': Value at Risk. - 'CVaR': Conditional Value at Risk. - 'EVaR': Entropic Value at Risk. - 'WR': Worst Realization (Minimax) - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio). - 'ADD': Average Drawdown of uncompounded cumulative returns. - 'DaR': Drawdown at Risk of uncompounded cumulative returns. - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns. - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns. - 'UCI': Ulcer Index of uncompounded cumulative returns. - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio). - 'ADD_Rel': Average Drawdown of compounded cumulative returns. - 'DaR_Rel': Drawdown at Risk of compounded cumulative returns. - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns. - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns. - 'UCI_Rel': Ulcer Index of compounded cumulative returns. rf : float, optional Risk free rate, must be in the same period of assets returns. The default is 0. linkage : string, optional Linkage method of hierarchical clustering, see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`_ for more details. The default is 'single'. Posible values are: - 'single'. - 'complete'. - 'average'. - 'weighted'. - 'centroid'. - 'median'. - 'ward'. k : int, optional Number of clusters. This value is took instead of the optimal number of clusters calculated with the two difference gap statistic. The default is None. max_k : int, optional Max number of clusters used by the two difference gap statistic to find the optimal number of clusters. The default is 10. leaf_order : bool, optional Indicates if the cluster are ordered so that the distance between successive leaves is minimal. The default is True. d : scalar The smoothing factor of ewma methods. The default is 0.94. Returns ------- w : DataFrame The weights of optimal portfolio.<|endoftext|>
016b80cb3fea47c8c26269781c5ab131db6c02c8cb09c4990f53417fe97167e2
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: 'Set up the tankerkoenig sensors.' coordinator: TankerkoenigDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] stations = coordinator.stations.values() entities = [] for station in stations: for fuel in coordinator.fuel_types: if (fuel not in station): _LOGGER.warning('Station %s does not offer %s fuel', station['id'], fuel) continue sensor = FuelPriceSensor(fuel, station, coordinator, coordinator.show_on_map) entities.append(sensor) _LOGGER.debug('Added sensors %s', entities) async_add_entities(entities)
Set up the tankerkoenig sensors.
homeassistant/components/tankerkoenig/sensor.py
async_setup_entry
orcema/core
30,023
python
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: coordinator: TankerkoenigDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] stations = coordinator.stations.values() entities = [] for station in stations: for fuel in coordinator.fuel_types: if (fuel not in station): _LOGGER.warning('Station %s does not offer %s fuel', station['id'], fuel) continue sensor = FuelPriceSensor(fuel, station, coordinator, coordinator.show_on_map) entities.append(sensor) _LOGGER.debug('Added sensors %s', entities) async_add_entities(entities)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: coordinator: TankerkoenigDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] stations = coordinator.stations.values() entities = [] for station in stations: for fuel in coordinator.fuel_types: if (fuel not in station): _LOGGER.warning('Station %s does not offer %s fuel', station['id'], fuel) continue sensor = FuelPriceSensor(fuel, station, coordinator, coordinator.show_on_map) entities.append(sensor) _LOGGER.debug('Added sensors %s', entities) async_add_entities(entities)<|docstring|>Set up the tankerkoenig sensors.<|endoftext|>
84c649cec73e56a47b9b978c8c954fd841948193926e0f7549397958002de65d
def __init__(self, fuel_type, station, coordinator, show_on_map): 'Initialize the sensor.' super().__init__(coordinator, station) self._station_id = station['id'] self._fuel_type = fuel_type self._attr_name = f"{station['brand']} {station['street']} {station['houseNumber']} {FUEL_TYPES[fuel_type]}" self._attr_native_unit_of_measurement = CURRENCY_EURO self._attr_unique_id = f"{station['id']}_{fuel_type}" attrs = {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_BRAND: station['brand'], ATTR_FUEL_TYPE: fuel_type, ATTR_STATION_NAME: station['name'], ATTR_STREET: station['street'], ATTR_HOUSE_NUMBER: station['houseNumber'], ATTR_POSTCODE: station['postCode'], ATTR_CITY: station['place']} if show_on_map: attrs[ATTR_LATITUDE] = station['lat'] attrs[ATTR_LONGITUDE] = station['lng'] self._attr_extra_state_attributes = attrs
Initialize the sensor.
homeassistant/components/tankerkoenig/sensor.py
__init__
orcema/core
30,023
python
def __init__(self, fuel_type, station, coordinator, show_on_map): super().__init__(coordinator, station) self._station_id = station['id'] self._fuel_type = fuel_type self._attr_name = f"{station['brand']} {station['street']} {station['houseNumber']} {FUEL_TYPES[fuel_type]}" self._attr_native_unit_of_measurement = CURRENCY_EURO self._attr_unique_id = f"{station['id']}_{fuel_type}" attrs = {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_BRAND: station['brand'], ATTR_FUEL_TYPE: fuel_type, ATTR_STATION_NAME: station['name'], ATTR_STREET: station['street'], ATTR_HOUSE_NUMBER: station['houseNumber'], ATTR_POSTCODE: station['postCode'], ATTR_CITY: station['place']} if show_on_map: attrs[ATTR_LATITUDE] = station['lat'] attrs[ATTR_LONGITUDE] = station['lng'] self._attr_extra_state_attributes = attrs
def __init__(self, fuel_type, station, coordinator, show_on_map): super().__init__(coordinator, station) self._station_id = station['id'] self._fuel_type = fuel_type self._attr_name = f"{station['brand']} {station['street']} {station['houseNumber']} {FUEL_TYPES[fuel_type]}" self._attr_native_unit_of_measurement = CURRENCY_EURO self._attr_unique_id = f"{station['id']}_{fuel_type}" attrs = {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_BRAND: station['brand'], ATTR_FUEL_TYPE: fuel_type, ATTR_STATION_NAME: station['name'], ATTR_STREET: station['street'], ATTR_HOUSE_NUMBER: station['houseNumber'], ATTR_POSTCODE: station['postCode'], ATTR_CITY: station['place']} if show_on_map: attrs[ATTR_LATITUDE] = station['lat'] attrs[ATTR_LONGITUDE] = station['lng'] self._attr_extra_state_attributes = attrs<|docstring|>Initialize the sensor.<|endoftext|>
8952acfbe9c4f3604d193f326f3207d43ff871f049ffd7b1efc89f1a8c35c82a
@property def native_value(self): 'Return the state of the device.' return self.coordinator.data[self._station_id].get(self._fuel_type)
Return the state of the device.
homeassistant/components/tankerkoenig/sensor.py
native_value
orcema/core
30,023
python
@property def native_value(self): return self.coordinator.data[self._station_id].get(self._fuel_type)
@property def native_value(self): return self.coordinator.data[self._station_id].get(self._fuel_type)<|docstring|>Return the state of the device.<|endoftext|>
aed1466842323d148445756f084faf32c7806274c7419b6588ea75d8118b9811
def main(): '\n ------------------ Read data --------------------\n ' filename = os.path.join('..', 'data', 'r_wallstreetbets_posts.csv') DATA = pd.read_csv(filename) '\n The data set contains a lot of information which we do not need for our model. \n This is information about username, individual links etc.\n We are primarily going to use the title column in the data set. This column contains the actual text.\n ' DATA = DATA[['title', 'created_utc', 'score']].sample(10000) output = [] print('Creating Doc object...') for title in DATA['title']: doc = nlp(title) output.append(str(doc)) '\n ----------- Process using gensim and spaCy ----------------\n ' '\n The next thing we do is using gensim to efficiently procude a model of bigrams and trigrams in the data.\n We first create bigrams based on words appearing one after another frequently. \n These bigrams are then fed into a trigram generator, which takes the bigram as the second part of a bigram.\n ' print('Building bi- and trigrams...') bigram = gensim.models.Phrases(output, min_count=20, threshold=100) trigram = gensim.models.Phrases(bigram[output], threshold=100) bigram_mod = gensim.models.phrases.Phraser(bigram) trigram_mod = gensim.models.phrases.Phraser(trigram) '\n We use the process_words function from our utils folder. \n This function takes a text, nlp, bigram_mod, trigram_mod, stop_words and allowed_postags as arguments.\n It uses gensim to preprocess the words and uses spaCy to lemmatize and POS tag. \n ' print('Processing the data...') data_processed = lda_utils.process_words(output, nlp, bigram_mod, trigram_mod, allowed_postags=['NOUN', 'PROPN']) print('Creating Dictionary...') id2word = corpora.Dictionary(data_processed) print('Creating Corpus...') corpus = [id2word.doc2bow(text) for text in data_processed] '\n --------------- Build LDA model ------------------------\n ' print('Building LDA model...') lda_model = gensim.models.LdaMulticore(corpus=corpus, id2word=id2word, num_topics=3, random_state=100, chunksize=10, passes=10, iterations=100, per_word_topics=True, minimum_probability=0.0) '\n -------------- Calculate model perplaxity ans coherence -------------------------\n ' print('\nPerplexity: ', lda_model.log_perplexity(corpus)) coherence_model_lda = CoherenceModel(model=lda_model, texts=data_processed, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) '\n -------------- Find the most optimal number of topics -------------------------\n ' '\n We want to find the most optimal number of topics for our model. \n Although the coherence value may be high at the high number of topics, it is not significant that it is the most optimal.\n One of the reasons for this is that there will be more repetitions of words the more topics there are. \n So if one wants to avoid this, it may be an advantage with fewer topics. \n ' print('Finding optimal topic number...') (model_list, coherence_values) = lda_utils.compute_coherence_values(texts=data_processed, corpus=corpus, dictionary=id2word, start=1, limit=40, step=2) '\n When we first ran the part to find the most optimal topic number, we got the number of 7 topics to be the most optimal. \n But when we later in the script saw the visualization of how the topics are distributed,\n it became clear that they formed three main clusters, where the topics overlapped. \n For this reason, we have chosen to include three topics in the model.\n ' '\n --------------------- Find most dominant topic per chunk ---------------------\n ' df_topic_keywords = lda_utils.format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_processed) df_dominant_topic = df_topic_keywords.reset_index() df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text'] df_dominant_topic.sample(10) pd.options.display.max_colwidth = 100 sent_topics_sorted_df = pd.DataFrame() sent_topics_out_df_grpd = df_topic_keywords.groupby('Dominant_Topic') for (i, grp) in sent_topics_out_df_grpd: sent_topics_sorted_df = pd.concat([sent_topics_sorted_df, grp.sort_values(['Perc_Contribution'], ascending=False).head(1)], axis=0) sent_topics_sorted_df.reset_index(drop=True, inplace=True) sent_topics_sorted_df.columns = ['Topic_Num', 'Topic_Perc_Contrib', 'Keywords', 'Representative Text'] '\n --------------------- Create dataframe for the values ---------------------\n ' values = list(lda_model.get_document_topics(corpus)) split = [] for entry in values: topic_prevelance = [] for topic in entry: topic_prevelance.append(topic[1]) split.append(topic_prevelance) value_df = pd.DataFrame(map(list, zip(*split))) print('Saving output...') df_outpath = os.path.join('..', 'output', 'value_df.csv') value_df.to_csv(df_outpath) sns.lineplot(data=value_df.T.rolling(50).mean()) outpath_viz = os.path.join('..', 'output', 'topic_matrix_viz.png') plt.savefig(outpath_viz) print('Output saved')
------------------ Read data --------------------
Assignment_5/src/GameStop_LDA.py
main
JohanneBW/cds_language_assignments
0
python
def main(): '\n \n ' filename = os.path.join('..', 'data', 'r_wallstreetbets_posts.csv') DATA = pd.read_csv(filename) '\n The data set contains a lot of information which we do not need for our model. \n This is information about username, individual links etc.\n We are primarily going to use the title column in the data set. This column contains the actual text.\n ' DATA = DATA[['title', 'created_utc', 'score']].sample(10000) output = [] print('Creating Doc object...') for title in DATA['title']: doc = nlp(title) output.append(str(doc)) '\n ----------- Process using gensim and spaCy ----------------\n ' '\n The next thing we do is using gensim to efficiently procude a model of bigrams and trigrams in the data.\n We first create bigrams based on words appearing one after another frequently. \n These bigrams are then fed into a trigram generator, which takes the bigram as the second part of a bigram.\n ' print('Building bi- and trigrams...') bigram = gensim.models.Phrases(output, min_count=20, threshold=100) trigram = gensim.models.Phrases(bigram[output], threshold=100) bigram_mod = gensim.models.phrases.Phraser(bigram) trigram_mod = gensim.models.phrases.Phraser(trigram) '\n We use the process_words function from our utils folder. \n This function takes a text, nlp, bigram_mod, trigram_mod, stop_words and allowed_postags as arguments.\n It uses gensim to preprocess the words and uses spaCy to lemmatize and POS tag. \n ' print('Processing the data...') data_processed = lda_utils.process_words(output, nlp, bigram_mod, trigram_mod, allowed_postags=['NOUN', 'PROPN']) print('Creating Dictionary...') id2word = corpora.Dictionary(data_processed) print('Creating Corpus...') corpus = [id2word.doc2bow(text) for text in data_processed] '\n --------------- Build LDA model ------------------------\n ' print('Building LDA model...') lda_model = gensim.models.LdaMulticore(corpus=corpus, id2word=id2word, num_topics=3, random_state=100, chunksize=10, passes=10, iterations=100, per_word_topics=True, minimum_probability=0.0) '\n -------------- Calculate model perplaxity ans coherence -------------------------\n ' print('\nPerplexity: ', lda_model.log_perplexity(corpus)) coherence_model_lda = CoherenceModel(model=lda_model, texts=data_processed, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) '\n -------------- Find the most optimal number of topics -------------------------\n ' '\n We want to find the most optimal number of topics for our model. \n Although the coherence value may be high at the high number of topics, it is not significant that it is the most optimal.\n One of the reasons for this is that there will be more repetitions of words the more topics there are. \n So if one wants to avoid this, it may be an advantage with fewer topics. \n ' print('Finding optimal topic number...') (model_list, coherence_values) = lda_utils.compute_coherence_values(texts=data_processed, corpus=corpus, dictionary=id2word, start=1, limit=40, step=2) '\n When we first ran the part to find the most optimal topic number, we got the number of 7 topics to be the most optimal. \n But when we later in the script saw the visualization of how the topics are distributed,\n it became clear that they formed three main clusters, where the topics overlapped. \n For this reason, we have chosen to include three topics in the model.\n ' '\n --------------------- Find most dominant topic per chunk ---------------------\n ' df_topic_keywords = lda_utils.format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_processed) df_dominant_topic = df_topic_keywords.reset_index() df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text'] df_dominant_topic.sample(10) pd.options.display.max_colwidth = 100 sent_topics_sorted_df = pd.DataFrame() sent_topics_out_df_grpd = df_topic_keywords.groupby('Dominant_Topic') for (i, grp) in sent_topics_out_df_grpd: sent_topics_sorted_df = pd.concat([sent_topics_sorted_df, grp.sort_values(['Perc_Contribution'], ascending=False).head(1)], axis=0) sent_topics_sorted_df.reset_index(drop=True, inplace=True) sent_topics_sorted_df.columns = ['Topic_Num', 'Topic_Perc_Contrib', 'Keywords', 'Representative Text'] '\n --------------------- Create dataframe for the values ---------------------\n ' values = list(lda_model.get_document_topics(corpus)) split = [] for entry in values: topic_prevelance = [] for topic in entry: topic_prevelance.append(topic[1]) split.append(topic_prevelance) value_df = pd.DataFrame(map(list, zip(*split))) print('Saving output...') df_outpath = os.path.join('..', 'output', 'value_df.csv') value_df.to_csv(df_outpath) sns.lineplot(data=value_df.T.rolling(50).mean()) outpath_viz = os.path.join('..', 'output', 'topic_matrix_viz.png') plt.savefig(outpath_viz) print('Output saved')
def main(): '\n \n ' filename = os.path.join('..', 'data', 'r_wallstreetbets_posts.csv') DATA = pd.read_csv(filename) '\n The data set contains a lot of information which we do not need for our model. \n This is information about username, individual links etc.\n We are primarily going to use the title column in the data set. This column contains the actual text.\n ' DATA = DATA[['title', 'created_utc', 'score']].sample(10000) output = [] print('Creating Doc object...') for title in DATA['title']: doc = nlp(title) output.append(str(doc)) '\n ----------- Process using gensim and spaCy ----------------\n ' '\n The next thing we do is using gensim to efficiently procude a model of bigrams and trigrams in the data.\n We first create bigrams based on words appearing one after another frequently. \n These bigrams are then fed into a trigram generator, which takes the bigram as the second part of a bigram.\n ' print('Building bi- and trigrams...') bigram = gensim.models.Phrases(output, min_count=20, threshold=100) trigram = gensim.models.Phrases(bigram[output], threshold=100) bigram_mod = gensim.models.phrases.Phraser(bigram) trigram_mod = gensim.models.phrases.Phraser(trigram) '\n We use the process_words function from our utils folder. \n This function takes a text, nlp, bigram_mod, trigram_mod, stop_words and allowed_postags as arguments.\n It uses gensim to preprocess the words and uses spaCy to lemmatize and POS tag. \n ' print('Processing the data...') data_processed = lda_utils.process_words(output, nlp, bigram_mod, trigram_mod, allowed_postags=['NOUN', 'PROPN']) print('Creating Dictionary...') id2word = corpora.Dictionary(data_processed) print('Creating Corpus...') corpus = [id2word.doc2bow(text) for text in data_processed] '\n --------------- Build LDA model ------------------------\n ' print('Building LDA model...') lda_model = gensim.models.LdaMulticore(corpus=corpus, id2word=id2word, num_topics=3, random_state=100, chunksize=10, passes=10, iterations=100, per_word_topics=True, minimum_probability=0.0) '\n -------------- Calculate model perplaxity ans coherence -------------------------\n ' print('\nPerplexity: ', lda_model.log_perplexity(corpus)) coherence_model_lda = CoherenceModel(model=lda_model, texts=data_processed, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) '\n -------------- Find the most optimal number of topics -------------------------\n ' '\n We want to find the most optimal number of topics for our model. \n Although the coherence value may be high at the high number of topics, it is not significant that it is the most optimal.\n One of the reasons for this is that there will be more repetitions of words the more topics there are. \n So if one wants to avoid this, it may be an advantage with fewer topics. \n ' print('Finding optimal topic number...') (model_list, coherence_values) = lda_utils.compute_coherence_values(texts=data_processed, corpus=corpus, dictionary=id2word, start=1, limit=40, step=2) '\n When we first ran the part to find the most optimal topic number, we got the number of 7 topics to be the most optimal. \n But when we later in the script saw the visualization of how the topics are distributed,\n it became clear that they formed three main clusters, where the topics overlapped. \n For this reason, we have chosen to include three topics in the model.\n ' '\n --------------------- Find most dominant topic per chunk ---------------------\n ' df_topic_keywords = lda_utils.format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_processed) df_dominant_topic = df_topic_keywords.reset_index() df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text'] df_dominant_topic.sample(10) pd.options.display.max_colwidth = 100 sent_topics_sorted_df = pd.DataFrame() sent_topics_out_df_grpd = df_topic_keywords.groupby('Dominant_Topic') for (i, grp) in sent_topics_out_df_grpd: sent_topics_sorted_df = pd.concat([sent_topics_sorted_df, grp.sort_values(['Perc_Contribution'], ascending=False).head(1)], axis=0) sent_topics_sorted_df.reset_index(drop=True, inplace=True) sent_topics_sorted_df.columns = ['Topic_Num', 'Topic_Perc_Contrib', 'Keywords', 'Representative Text'] '\n --------------------- Create dataframe for the values ---------------------\n ' values = list(lda_model.get_document_topics(corpus)) split = [] for entry in values: topic_prevelance = [] for topic in entry: topic_prevelance.append(topic[1]) split.append(topic_prevelance) value_df = pd.DataFrame(map(list, zip(*split))) print('Saving output...') df_outpath = os.path.join('..', 'output', 'value_df.csv') value_df.to_csv(df_outpath) sns.lineplot(data=value_df.T.rolling(50).mean()) outpath_viz = os.path.join('..', 'output', 'topic_matrix_viz.png') plt.savefig(outpath_viz) print('Output saved')<|docstring|>------------------ Read data --------------------<|endoftext|>
1704d8e57aa2ba5494f301c4c7436a681a292d28bdc25b35213063985a1ce908
def setUp(self): 'Create a test gallery with 2 photos.' super(GalleryTest, self).setUp() self.test_gallery = models.Gallery.objects.create(title='Fake Gallery', title_slug='fake-gallery') self.pl2 = helpers._create_new_photo(name='Landscape2', slug='landscape2') self.test_gallery.photos.add(self.pl) self.test_gallery.photos.add(self.pl2)
Create a test gallery with 2 photos.
photologue/tests/gallery.py
setUp
TAMUArch/django-photologue
0
python
def setUp(self): super(GalleryTest, self).setUp() self.test_gallery = models.Gallery.objects.create(title='Fake Gallery', title_slug='fake-gallery') self.pl2 = helpers._create_new_photo(name='Landscape2', slug='landscape2') self.test_gallery.photos.add(self.pl) self.test_gallery.photos.add(self.pl2)
def setUp(self): super(GalleryTest, self).setUp() self.test_gallery = models.Gallery.objects.create(title='Fake Gallery', title_slug='fake-gallery') self.pl2 = helpers._create_new_photo(name='Landscape2', slug='landscape2') self.test_gallery.photos.add(self.pl) self.test_gallery.photos.add(self.pl2)<|docstring|>Create a test gallery with 2 photos.<|endoftext|>
11d7b1c956e3e6f64a2f4e73dd65782dc82dd8f51a0c736616944d46608eb7e5
def test_public(self): "Method 'public' should only return photos flagged as public." self.assert_((self.test_gallery.public().count() == 2)) self.pl.is_public = False self.pl.save() self.assert_((self.test_gallery.public().count() == 1))
Method 'public' should only return photos flagged as public.
photologue/tests/gallery.py
test_public
TAMUArch/django-photologue
0
python
def test_public(self): self.assert_((self.test_gallery.public().count() == 2)) self.pl.is_public = False self.pl.save() self.assert_((self.test_gallery.public().count() == 1))
def test_public(self): self.assert_((self.test_gallery.public().count() == 2)) self.pl.is_public = False self.pl.save() self.assert_((self.test_gallery.public().count() == 1))<|docstring|>Method 'public' should only return photos flagged as public.<|endoftext|>
a23ad91d9baef7c0db799d5cf8b78b9c4ebc3ab322f37b42c40a3fc84bea8584
def test_photo_count(self): "Method 'photo_count' should return the count of the photos in this\n gallery." self.assert_((self.test_gallery.photo_count() == 2)) self.pl.is_public = False self.pl.save() self.assert_((self.test_gallery.photo_count() == 1)) self.assert_((self.test_gallery.photo_count(public=False) == 2))
Method 'photo_count' should return the count of the photos in this gallery.
photologue/tests/gallery.py
test_photo_count
TAMUArch/django-photologue
0
python
def test_photo_count(self): "Method 'photo_count' should return the count of the photos in this\n gallery." self.assert_((self.test_gallery.photo_count() == 2)) self.pl.is_public = False self.pl.save() self.assert_((self.test_gallery.photo_count() == 1)) self.assert_((self.test_gallery.photo_count(public=False) == 2))
def test_photo_count(self): "Method 'photo_count' should return the count of the photos in this\n gallery." self.assert_((self.test_gallery.photo_count() == 2)) self.pl.is_public = False self.pl.save() self.assert_((self.test_gallery.photo_count() == 1)) self.assert_((self.test_gallery.photo_count(public=False) == 2))<|docstring|>Method 'photo_count' should return the count of the photos in this gallery.<|endoftext|>
ef81f08c8aaa6df57384b8cfb8128dbbdf8f6239ba83c3d4f2fbbe2f647ec165
def test_sample(self): "Method 'sample' should return a random queryset of photos from the \n gallery." _current_sample_size = models.SAMPLE_SIZE models.SAMPLE_SIZE = 5 self.assert_((len(self.test_gallery.sample()) == 2)) self.assert_((len(self.test_gallery.sample(count=1)) == 1)) self.pl.is_public = False self.pl.save() self.assert_((len(self.test_gallery.sample(count=2)) == 1)) self.pl.is_public = True self.pl.save() models.SAMPLE_SIZE = 1 self.assert_((len(self.test_gallery.sample()) == 1)) models.SAMPLE_SIZE = _current_sample_size
Method 'sample' should return a random queryset of photos from the gallery.
photologue/tests/gallery.py
test_sample
TAMUArch/django-photologue
0
python
def test_sample(self): "Method 'sample' should return a random queryset of photos from the \n gallery." _current_sample_size = models.SAMPLE_SIZE models.SAMPLE_SIZE = 5 self.assert_((len(self.test_gallery.sample()) == 2)) self.assert_((len(self.test_gallery.sample(count=1)) == 1)) self.pl.is_public = False self.pl.save() self.assert_((len(self.test_gallery.sample(count=2)) == 1)) self.pl.is_public = True self.pl.save() models.SAMPLE_SIZE = 1 self.assert_((len(self.test_gallery.sample()) == 1)) models.SAMPLE_SIZE = _current_sample_size
def test_sample(self): "Method 'sample' should return a random queryset of photos from the \n gallery." _current_sample_size = models.SAMPLE_SIZE models.SAMPLE_SIZE = 5 self.assert_((len(self.test_gallery.sample()) == 2)) self.assert_((len(self.test_gallery.sample(count=1)) == 1)) self.pl.is_public = False self.pl.save() self.assert_((len(self.test_gallery.sample(count=2)) == 1)) self.pl.is_public = True self.pl.save() models.SAMPLE_SIZE = 1 self.assert_((len(self.test_gallery.sample()) == 1)) models.SAMPLE_SIZE = _current_sample_size<|docstring|>Method 'sample' should return a random queryset of photos from the gallery.<|endoftext|>
4e4ebcb76a46671f486f232589f405560809c294d4ae6cb167b0a662be854998
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields): ' \n Creates and saves a User with the given email and password.\n ' if (not email): raise ValueError('The given email must be set') email = self.normalize_email(email) user = self.model(email=email, is_staff=is_staff, is_superuser=is_superuser, **extra_fields) user.set_password(password) user.save(using=self._db) return user
Creates and saves a User with the given email and password.
ntnui/apps/database/utils/user_manager.py
_create_user
kapteinstein/tdt4290
0
python
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields): ' \n \n ' if (not email): raise ValueError('The given email must be set') email = self.normalize_email(email) user = self.model(email=email, is_staff=is_staff, is_superuser=is_superuser, **extra_fields) user.set_password(password) user.save(using=self._db) return user
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields): ' \n \n ' if (not email): raise ValueError('The given email must be set') email = self.normalize_email(email) user = self.model(email=email, is_staff=is_staff, is_superuser=is_superuser, **extra_fields) user.set_password(password) user.save(using=self._db) return user<|docstring|>Creates and saves a User with the given email and password.<|endoftext|>
9957cc50aec175c085c630bc027b70c17da467d8e4512ef2a0ba0a07766e0f1c
def __init__(self, session, object_factory, request_validator): 'Initialize a new SupportBundleTriggerConfiguration\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the Identity Services Engine service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n ' check_type(session, RestSession) super(SupportBundleTriggerConfiguration, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator
Initialize a new SupportBundleTriggerConfiguration object with the provided RestSession. Args: session(RestSession): The RESTful session object to be used for API calls to the Identity Services Engine service. Raises: TypeError: If the parameter types are incorrect.
ciscoisesdk/api/v3_1_1/support_bundle_trigger_configuration.py
__init__
CiscoISE/ciscoisesdk
36
python
def __init__(self, session, object_factory, request_validator): 'Initialize a new SupportBundleTriggerConfiguration\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the Identity Services Engine service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n ' check_type(session, RestSession) super(SupportBundleTriggerConfiguration, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator
def __init__(self, session, object_factory, request_validator): 'Initialize a new SupportBundleTriggerConfiguration\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the Identity Services Engine service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n ' check_type(session, RestSession) super(SupportBundleTriggerConfiguration, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator<|docstring|>Initialize a new SupportBundleTriggerConfiguration object with the provided RestSession. Args: session(RestSession): The RESTful session object to be used for API calls to the Identity Services Engine service. Raises: TypeError: If the parameter types are incorrect.<|endoftext|>
150156373556d0251394d4695de2c237aa5e72a7164ef26264aca05537d1d0e8
def create_support_bundle(self, description=None, host_name=None, name=None, support_bundle_include_options=None, headers=None, payload=None, active_validation=True, **query_parameters): "This API allows the client to create a support bundle trigger\n configuration.\n\n Args:\n description(string): description, property of the\n request body.\n host_name(string): This parameter is hostName only, xxxx\n of xxxx.yyy.zz, property of the request\n body.\n name(string): Resource Name, property of the request\n body.\n support_bundle_include_options(object):\n supportBundleIncludeOptions, property of\n the request body.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('Content-Type' in headers): check_type(headers.get('Content-Type'), basestring, may_be_none=False) if ('Accept' in headers): check_type(headers.get('Accept'), basestring, may_be_none=False) if ('ERS-Media-Type' in headers): check_type(headers.get('ERS-Media-Type'), basestring) if ('X-CSRF-TOKEN' in headers): check_type(headers.get('X-CSRF-TOKEN'), basestring) with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True is_xml_payload = ('application/xml' in _headers.get('Content-Type', [])) if (active_validation and is_xml_payload): check_type(payload, basestring) if (active_validation and (not is_xml_payload)): check_type(payload, dict) _params = {} _params.update(query_parameters) _params = dict_from_items_with_values(_params) path_params = {} if is_xml_payload: _payload = payload else: _tmp_payload = {'name': name, 'description': description, 'hostName': host_name, 'supportBundleIncludeOptions': support_bundle_include_options} _payload = {'SupportBundle': dict_from_items_with_values(_tmp_payload)} _payload.update((payload or {})) _payload = dict_from_items_with_values(_payload) if (active_validation and (not is_xml_payload)): self._request_validator('jsd_fac48e5c63abfe2feec6fd1903_v3_1_1').validate(_payload) e_url = '/ers/config/supportbundle' endpoint_full_url = apply_path_params(e_url, path_params) request_params = ({'data': _payload} if is_xml_payload else {'json': _payload}) if with_custom_headers: _api_response = self._session.post(endpoint_full_url, params=_params, headers=_headers, **request_params) else: _api_response = self._session.post(endpoint_full_url, params=_params, **request_params) return self._object_factory('bpm_fac48e5c63abfe2feec6fd1903_v3_1_1', _api_response)
This API allows the client to create a support bundle trigger configuration. Args: description(string): description, property of the request body. host_name(string): This parameter is hostName only, xxxx of xxxx.yyy.zz, property of the request body. name(string): Resource Name, property of the request body. support_bundle_include_options(object): supportBundleIncludeOptions, property of the request body. headers(dict): Dictionary of HTTP Headers to send with the Request . payload(dict): A JSON serializable Python object to send in the body of the Request. active_validation(bool): Enable/Disable payload validation. Defaults to True. **query_parameters: Additional query parameters (provides support for parameters that may be added in the future). Returns: RestResponse: REST response with following properties: - headers(MyDict): response headers. - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation or the bracket notation. - content(bytes): representation of the request's response - text(str): representation of the request's response Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the Identity Services Engine cloud returns an error.
ciscoisesdk/api/v3_1_1/support_bundle_trigger_configuration.py
create_support_bundle
CiscoISE/ciscoisesdk
36
python
def create_support_bundle(self, description=None, host_name=None, name=None, support_bundle_include_options=None, headers=None, payload=None, active_validation=True, **query_parameters): "This API allows the client to create a support bundle trigger\n configuration.\n\n Args:\n description(string): description, property of the\n request body.\n host_name(string): This parameter is hostName only, xxxx\n of xxxx.yyy.zz, property of the request\n body.\n name(string): Resource Name, property of the request\n body.\n support_bundle_include_options(object):\n supportBundleIncludeOptions, property of\n the request body.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('Content-Type' in headers): check_type(headers.get('Content-Type'), basestring, may_be_none=False) if ('Accept' in headers): check_type(headers.get('Accept'), basestring, may_be_none=False) if ('ERS-Media-Type' in headers): check_type(headers.get('ERS-Media-Type'), basestring) if ('X-CSRF-TOKEN' in headers): check_type(headers.get('X-CSRF-TOKEN'), basestring) with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True is_xml_payload = ('application/xml' in _headers.get('Content-Type', [])) if (active_validation and is_xml_payload): check_type(payload, basestring) if (active_validation and (not is_xml_payload)): check_type(payload, dict) _params = {} _params.update(query_parameters) _params = dict_from_items_with_values(_params) path_params = {} if is_xml_payload: _payload = payload else: _tmp_payload = {'name': name, 'description': description, 'hostName': host_name, 'supportBundleIncludeOptions': support_bundle_include_options} _payload = {'SupportBundle': dict_from_items_with_values(_tmp_payload)} _payload.update((payload or {})) _payload = dict_from_items_with_values(_payload) if (active_validation and (not is_xml_payload)): self._request_validator('jsd_fac48e5c63abfe2feec6fd1903_v3_1_1').validate(_payload) e_url = '/ers/config/supportbundle' endpoint_full_url = apply_path_params(e_url, path_params) request_params = ({'data': _payload} if is_xml_payload else {'json': _payload}) if with_custom_headers: _api_response = self._session.post(endpoint_full_url, params=_params, headers=_headers, **request_params) else: _api_response = self._session.post(endpoint_full_url, params=_params, **request_params) return self._object_factory('bpm_fac48e5c63abfe2feec6fd1903_v3_1_1', _api_response)
def create_support_bundle(self, description=None, host_name=None, name=None, support_bundle_include_options=None, headers=None, payload=None, active_validation=True, **query_parameters): "This API allows the client to create a support bundle trigger\n configuration.\n\n Args:\n description(string): description, property of the\n request body.\n host_name(string): This parameter is hostName only, xxxx\n of xxxx.yyy.zz, property of the request\n body.\n name(string): Resource Name, property of the request\n body.\n support_bundle_include_options(object):\n supportBundleIncludeOptions, property of\n the request body.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('Content-Type' in headers): check_type(headers.get('Content-Type'), basestring, may_be_none=False) if ('Accept' in headers): check_type(headers.get('Accept'), basestring, may_be_none=False) if ('ERS-Media-Type' in headers): check_type(headers.get('ERS-Media-Type'), basestring) if ('X-CSRF-TOKEN' in headers): check_type(headers.get('X-CSRF-TOKEN'), basestring) with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True is_xml_payload = ('application/xml' in _headers.get('Content-Type', [])) if (active_validation and is_xml_payload): check_type(payload, basestring) if (active_validation and (not is_xml_payload)): check_type(payload, dict) _params = {} _params.update(query_parameters) _params = dict_from_items_with_values(_params) path_params = {} if is_xml_payload: _payload = payload else: _tmp_payload = {'name': name, 'description': description, 'hostName': host_name, 'supportBundleIncludeOptions': support_bundle_include_options} _payload = {'SupportBundle': dict_from_items_with_values(_tmp_payload)} _payload.update((payload or {})) _payload = dict_from_items_with_values(_payload) if (active_validation and (not is_xml_payload)): self._request_validator('jsd_fac48e5c63abfe2feec6fd1903_v3_1_1').validate(_payload) e_url = '/ers/config/supportbundle' endpoint_full_url = apply_path_params(e_url, path_params) request_params = ({'data': _payload} if is_xml_payload else {'json': _payload}) if with_custom_headers: _api_response = self._session.post(endpoint_full_url, params=_params, headers=_headers, **request_params) else: _api_response = self._session.post(endpoint_full_url, params=_params, **request_params) return self._object_factory('bpm_fac48e5c63abfe2feec6fd1903_v3_1_1', _api_response)<|docstring|>This API allows the client to create a support bundle trigger configuration. Args: description(string): description, property of the request body. host_name(string): This parameter is hostName only, xxxx of xxxx.yyy.zz, property of the request body. name(string): Resource Name, property of the request body. support_bundle_include_options(object): supportBundleIncludeOptions, property of the request body. headers(dict): Dictionary of HTTP Headers to send with the Request . payload(dict): A JSON serializable Python object to send in the body of the Request. active_validation(bool): Enable/Disable payload validation. Defaults to True. **query_parameters: Additional query parameters (provides support for parameters that may be added in the future). Returns: RestResponse: REST response with following properties: - headers(MyDict): response headers. - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation or the bracket notation. - content(bytes): representation of the request's response - text(str): representation of the request's response Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the Identity Services Engine cloud returns an error.<|endoftext|>
9e504afad45cb9ece173b814dccc8daefac4dde01644b5243a8aae6c8a405e34
def create(self, description=None, host_name=None, name=None, support_bundle_include_options=None, headers=None, payload=None, active_validation=True, **query_parameters): 'Alias for `create_support_bundle <#ciscoisesdk.\n api.v3_1_1.support_bundle_trigger_configuration.\n SupportBundleTriggerConfiguration.create_support_bundle>`_\n ' return self.create_support_bundle(description=description, host_name=host_name, name=name, support_bundle_include_options=support_bundle_include_options, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
Alias for `create_support_bundle <#ciscoisesdk. api.v3_1_1.support_bundle_trigger_configuration. SupportBundleTriggerConfiguration.create_support_bundle>`_
ciscoisesdk/api/v3_1_1/support_bundle_trigger_configuration.py
create
CiscoISE/ciscoisesdk
36
python
def create(self, description=None, host_name=None, name=None, support_bundle_include_options=None, headers=None, payload=None, active_validation=True, **query_parameters): 'Alias for `create_support_bundle <#ciscoisesdk.\n api.v3_1_1.support_bundle_trigger_configuration.\n SupportBundleTriggerConfiguration.create_support_bundle>`_\n ' return self.create_support_bundle(description=description, host_name=host_name, name=name, support_bundle_include_options=support_bundle_include_options, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
def create(self, description=None, host_name=None, name=None, support_bundle_include_options=None, headers=None, payload=None, active_validation=True, **query_parameters): 'Alias for `create_support_bundle <#ciscoisesdk.\n api.v3_1_1.support_bundle_trigger_configuration.\n SupportBundleTriggerConfiguration.create_support_bundle>`_\n ' return self.create_support_bundle(description=description, host_name=host_name, name=name, support_bundle_include_options=support_bundle_include_options, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)<|docstring|>Alias for `create_support_bundle <#ciscoisesdk. api.v3_1_1.support_bundle_trigger_configuration. SupportBundleTriggerConfiguration.create_support_bundle>`_<|endoftext|>
e38e0766f0cee54ff547bfbbad353b80abc4f6c7225b596de454992adea90b39
def get_version(self, headers=None, **query_parameters): "This API helps to retrieve the version information related to\n the support bundle trigger configuration.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('Content-Type' in headers): check_type(headers.get('Content-Type'), basestring, may_be_none=False) if ('Accept' in headers): check_type(headers.get('Accept'), basestring, may_be_none=False) with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True _params = {} _params.update(query_parameters) _params = dict_from_items_with_values(_params) path_params = {} e_url = '/ers/config/supportbundle/versioninfo' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: _api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: _api_response = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_a2b17c3c4eab52caa2fc7c811965c79d_v3_1_1', _api_response)
This API helps to retrieve the version information related to the support bundle trigger configuration. Args: headers(dict): Dictionary of HTTP Headers to send with the Request . **query_parameters: Additional query parameters (provides support for parameters that may be added in the future). Returns: RestResponse: REST response with following properties: - headers(MyDict): response headers. - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation or the bracket notation. - content(bytes): representation of the request's response - text(str): representation of the request's response Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the Identity Services Engine cloud returns an error.
ciscoisesdk/api/v3_1_1/support_bundle_trigger_configuration.py
get_version
CiscoISE/ciscoisesdk
36
python
def get_version(self, headers=None, **query_parameters): "This API helps to retrieve the version information related to\n the support bundle trigger configuration.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('Content-Type' in headers): check_type(headers.get('Content-Type'), basestring, may_be_none=False) if ('Accept' in headers): check_type(headers.get('Accept'), basestring, may_be_none=False) with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True _params = {} _params.update(query_parameters) _params = dict_from_items_with_values(_params) path_params = {} e_url = '/ers/config/supportbundle/versioninfo' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: _api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: _api_response = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_a2b17c3c4eab52caa2fc7c811965c79d_v3_1_1', _api_response)
def get_version(self, headers=None, **query_parameters): "This API helps to retrieve the version information related to\n the support bundle trigger configuration.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('Content-Type' in headers): check_type(headers.get('Content-Type'), basestring, may_be_none=False) if ('Accept' in headers): check_type(headers.get('Accept'), basestring, may_be_none=False) with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True _params = {} _params.update(query_parameters) _params = dict_from_items_with_values(_params) path_params = {} e_url = '/ers/config/supportbundle/versioninfo' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: _api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: _api_response = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_a2b17c3c4eab52caa2fc7c811965c79d_v3_1_1', _api_response)<|docstring|>This API helps to retrieve the version information related to the support bundle trigger configuration. Args: headers(dict): Dictionary of HTTP Headers to send with the Request . **query_parameters: Additional query parameters (provides support for parameters that may be added in the future). Returns: RestResponse: REST response with following properties: - headers(MyDict): response headers. - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation or the bracket notation. - content(bytes): representation of the request's response - text(str): representation of the request's response Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the Identity Services Engine cloud returns an error.<|endoftext|>
814fd63d676ae5bb01eb4211e9a0df80a31f836555cfd180960c9ed5e455878e
def exp_refs(scm: 'Git') -> Generator[('ExpRefInfo', None, None)]: 'Iterate over all experiment refs.' for ref in scm.iter_refs(base=EXPS_NAMESPACE): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
Iterate over all experiment refs.
dvc/repo/experiments/utils.py
exp_refs
jonburdo/dvc
0
python
def exp_refs(scm: 'Git') -> Generator[('ExpRefInfo', None, None)]: for ref in scm.iter_refs(base=EXPS_NAMESPACE): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
def exp_refs(scm: 'Git') -> Generator[('ExpRefInfo', None, None)]: for ref in scm.iter_refs(base=EXPS_NAMESPACE): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))<|docstring|>Iterate over all experiment refs.<|endoftext|>
a794f4bc896746abc5145975c5b10937780d05bbcc8b3d9775bb5a0a3264c8ce
def exp_refs_by_rev(scm: 'Git', rev: str) -> Generator[('ExpRefInfo', None, None)]: 'Iterate over all experiment refs pointing to the specified revision.' for ref in scm.get_refs_containing(rev, EXPS_NAMESPACE): if (not (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH))): (yield ExpRefInfo.from_ref(ref))
Iterate over all experiment refs pointing to the specified revision.
dvc/repo/experiments/utils.py
exp_refs_by_rev
jonburdo/dvc
0
python
def exp_refs_by_rev(scm: 'Git', rev: str) -> Generator[('ExpRefInfo', None, None)]: for ref in scm.get_refs_containing(rev, EXPS_NAMESPACE): if (not (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH))): (yield ExpRefInfo.from_ref(ref))
def exp_refs_by_rev(scm: 'Git', rev: str) -> Generator[('ExpRefInfo', None, None)]: for ref in scm.get_refs_containing(rev, EXPS_NAMESPACE): if (not (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH))): (yield ExpRefInfo.from_ref(ref))<|docstring|>Iterate over all experiment refs pointing to the specified revision.<|endoftext|>
8a1c678d024c86c5b4325a78bc8c794c9b99488423dd081a6cc5492d3fe07afe
def exp_refs_by_baseline(scm: 'Git', rev: str) -> Generator[('ExpRefInfo', None, None)]: 'Iterate over all experiment refs with the specified baseline.' ref_info = ExpRefInfo(baseline_sha=rev) for ref in scm.iter_refs(base=str(ref_info)): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
Iterate over all experiment refs with the specified baseline.
dvc/repo/experiments/utils.py
exp_refs_by_baseline
jonburdo/dvc
0
python
def exp_refs_by_baseline(scm: 'Git', rev: str) -> Generator[('ExpRefInfo', None, None)]: ref_info = ExpRefInfo(baseline_sha=rev) for ref in scm.iter_refs(base=str(ref_info)): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
def exp_refs_by_baseline(scm: 'Git', rev: str) -> Generator[('ExpRefInfo', None, None)]: ref_info = ExpRefInfo(baseline_sha=rev) for ref in scm.iter_refs(base=str(ref_info)): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))<|docstring|>Iterate over all experiment refs with the specified baseline.<|endoftext|>
e6a19d07ca4c6129de3c1e11a024b450d25b30bf82019888bde3e21f6ac52e7d
def remote_exp_refs(scm: 'Git', url: str) -> Generator[('ExpRefInfo', None, None)]: 'Iterate over all remote experiment refs.' for ref in iter_remote_refs(scm, url, base=EXPS_NAMESPACE): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
Iterate over all remote experiment refs.
dvc/repo/experiments/utils.py
remote_exp_refs
jonburdo/dvc
0
python
def remote_exp_refs(scm: 'Git', url: str) -> Generator[('ExpRefInfo', None, None)]: for ref in iter_remote_refs(scm, url, base=EXPS_NAMESPACE): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
def remote_exp_refs(scm: 'Git', url: str) -> Generator[('ExpRefInfo', None, None)]: for ref in iter_remote_refs(scm, url, base=EXPS_NAMESPACE): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))<|docstring|>Iterate over all remote experiment refs.<|endoftext|>
39eb57684d1070ca654be8339712782a2b7fdcac1b15ab78e9686e86ead46d63
def exp_refs_by_names(scm: 'Git', names: Set[str], url: Optional[str]=None) -> Dict[(str, List[ExpRefInfo])]: 'Iterate over all experiment refs matching the specified names.' resolve_results = defaultdict(list) ref_info_gen = (remote_exp_refs(scm, url) if url else exp_refs(scm)) for ref_info in ref_info_gen: if (ref_info.name in names): resolve_results[ref_info.name].append(ref_info) return resolve_results
Iterate over all experiment refs matching the specified names.
dvc/repo/experiments/utils.py
exp_refs_by_names
jonburdo/dvc
0
python
def exp_refs_by_names(scm: 'Git', names: Set[str], url: Optional[str]=None) -> Dict[(str, List[ExpRefInfo])]: resolve_results = defaultdict(list) ref_info_gen = (remote_exp_refs(scm, url) if url else exp_refs(scm)) for ref_info in ref_info_gen: if (ref_info.name in names): resolve_results[ref_info.name].append(ref_info) return resolve_results
def exp_refs_by_names(scm: 'Git', names: Set[str], url: Optional[str]=None) -> Dict[(str, List[ExpRefInfo])]: resolve_results = defaultdict(list) ref_info_gen = (remote_exp_refs(scm, url) if url else exp_refs(scm)) for ref_info in ref_info_gen: if (ref_info.name in names): resolve_results[ref_info.name].append(ref_info) return resolve_results<|docstring|>Iterate over all experiment refs matching the specified names.<|endoftext|>
8cdbfaf48dd39be95e6ca6c265c712e327b135424e3350a9c601a242ba9b54d7
def remote_exp_refs_by_baseline(scm: 'Git', url: str, rev: str) -> Generator[('ExpRefInfo', None, None)]: 'Iterate over all remote experiment refs with the specified baseline.' ref_info = ExpRefInfo(baseline_sha=rev) for ref in iter_remote_refs(scm, url, base=str(ref_info)): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
Iterate over all remote experiment refs with the specified baseline.
dvc/repo/experiments/utils.py
remote_exp_refs_by_baseline
jonburdo/dvc
0
python
def remote_exp_refs_by_baseline(scm: 'Git', url: str, rev: str) -> Generator[('ExpRefInfo', None, None)]: ref_info = ExpRefInfo(baseline_sha=rev) for ref in iter_remote_refs(scm, url, base=str(ref_info)): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))
def remote_exp_refs_by_baseline(scm: 'Git', url: str, rev: str) -> Generator[('ExpRefInfo', None, None)]: ref_info = ExpRefInfo(baseline_sha=rev) for ref in iter_remote_refs(scm, url, base=str(ref_info)): if (ref.startswith(EXEC_NAMESPACE) or (ref == EXPS_STASH)): continue (yield ExpRefInfo.from_ref(ref))<|docstring|>Iterate over all remote experiment refs with the specified baseline.<|endoftext|>
48ffb7cd02e1d68eda332fb87ce4ad2caf0f60ef01cd2cac378e2ec7de017850
def exp_commits(scm: 'Git', ref_infos: Iterable['ExpRefInfo']=None) -> Iterable[str]: 'Iterate over all experiment commits.' shas: Set['str'] = set() refs = (ref_infos if ref_infos else exp_refs(scm)) for ref_info in refs: shas.update(scm.branch_revs(str(ref_info), ref_info.baseline_sha)) if ref_info.baseline_sha: shas.add(ref_info.baseline_sha) (yield from shas)
Iterate over all experiment commits.
dvc/repo/experiments/utils.py
exp_commits
jonburdo/dvc
0
python
def exp_commits(scm: 'Git', ref_infos: Iterable['ExpRefInfo']=None) -> Iterable[str]: shas: Set['str'] = set() refs = (ref_infos if ref_infos else exp_refs(scm)) for ref_info in refs: shas.update(scm.branch_revs(str(ref_info), ref_info.baseline_sha)) if ref_info.baseline_sha: shas.add(ref_info.baseline_sha) (yield from shas)
def exp_commits(scm: 'Git', ref_infos: Iterable['ExpRefInfo']=None) -> Iterable[str]: shas: Set['str'] = set() refs = (ref_infos if ref_infos else exp_refs(scm)) for ref_info in refs: shas.update(scm.branch_revs(str(ref_info), ref_info.baseline_sha)) if ref_info.baseline_sha: shas.add(ref_info.baseline_sha) (yield from shas)<|docstring|>Iterate over all experiment commits.<|endoftext|>
26a7f1d358e9dfd786f05f3aa2b57dfb107969bda30b49e2efcf7d49fa15f0b7
def resolve_name(scm: 'Git', exp_names: Union[(Iterable[str], str)], git_remote: Optional[str]=None) -> Dict[(str, Optional[ExpRefInfo])]: 'find the ref_info of specified names.' if isinstance(exp_names, str): exp_names = [exp_names] result = {} unresolved = set() for exp_name in exp_names: if exp_name.startswith('refs/'): result[exp_name] = ExpRefInfo.from_ref(exp_name) else: unresolved.add(exp_name) unresolved_result = exp_refs_by_names(scm, unresolved, git_remote) cur_rev = scm.get_rev() for name in unresolved: ref_info_list = unresolved_result[name] if (not ref_info_list): result[name] = None elif (len(ref_info_list) == 1): result[name] = ref_info_list[0] else: for ref_info in ref_info_list: if (ref_info.baseline_sha == cur_rev): result[name] = ref_info break else: raise AmbiguousExpRefInfo(name, ref_info_list) return result
find the ref_info of specified names.
dvc/repo/experiments/utils.py
resolve_name
jonburdo/dvc
0
python
def resolve_name(scm: 'Git', exp_names: Union[(Iterable[str], str)], git_remote: Optional[str]=None) -> Dict[(str, Optional[ExpRefInfo])]: if isinstance(exp_names, str): exp_names = [exp_names] result = {} unresolved = set() for exp_name in exp_names: if exp_name.startswith('refs/'): result[exp_name] = ExpRefInfo.from_ref(exp_name) else: unresolved.add(exp_name) unresolved_result = exp_refs_by_names(scm, unresolved, git_remote) cur_rev = scm.get_rev() for name in unresolved: ref_info_list = unresolved_result[name] if (not ref_info_list): result[name] = None elif (len(ref_info_list) == 1): result[name] = ref_info_list[0] else: for ref_info in ref_info_list: if (ref_info.baseline_sha == cur_rev): result[name] = ref_info break else: raise AmbiguousExpRefInfo(name, ref_info_list) return result
def resolve_name(scm: 'Git', exp_names: Union[(Iterable[str], str)], git_remote: Optional[str]=None) -> Dict[(str, Optional[ExpRefInfo])]: if isinstance(exp_names, str): exp_names = [exp_names] result = {} unresolved = set() for exp_name in exp_names: if exp_name.startswith('refs/'): result[exp_name] = ExpRefInfo.from_ref(exp_name) else: unresolved.add(exp_name) unresolved_result = exp_refs_by_names(scm, unresolved, git_remote) cur_rev = scm.get_rev() for name in unresolved: ref_info_list = unresolved_result[name] if (not ref_info_list): result[name] = None elif (len(ref_info_list) == 1): result[name] = ref_info_list[0] else: for ref_info in ref_info_list: if (ref_info.baseline_sha == cur_rev): result[name] = ref_info break else: raise AmbiguousExpRefInfo(name, ref_info_list) return result<|docstring|>find the ref_info of specified names.<|endoftext|>
eaaf8cd01b4f4483e7d7857b53ddf7b73e268fced6fe7a4631642a66be86c921
def maptostr(target_list): 'Casts a list of python types to a list of strings\n Args:\n target_list (list): list containing python types\n Returns:\n List containing strings\n ' return [str(each) for each in target_list]
Casts a list of python types to a list of strings Args: target_list (list): list containing python types Returns: List containing strings
socless/legacy_jinja.py
maptostr
A-Gray-Cat/socless_python
4
python
def maptostr(target_list): 'Casts a list of python types to a list of strings\n Args:\n target_list (list): list containing python types\n Returns:\n List containing strings\n ' return [str(each) for each in target_list]
def maptostr(target_list): 'Casts a list of python types to a list of strings\n Args:\n target_list (list): list containing python types\n Returns:\n List containing strings\n ' return [str(each) for each in target_list]<|docstring|>Casts a list of python types to a list of strings Args: target_list (list): list containing python types Returns: List containing strings<|endoftext|>
3f15faec0b312947b5f24ae564d2e471dcb095574198a0e533499d67b5246e71
def send_req(self): 'Send a stats request to a datapath.' raise NotImplementedError
Send a stats request to a datapath.
faucet/gauge_pollers.py
send_req
libunamari/faucet-tests
0
python
def send_req(self): raise NotImplementedError
def send_req(self): raise NotImplementedError<|docstring|>Send a stats request to a datapath.<|endoftext|>
e8be2641bfe35d1c046340b861cb0f253163c2d70aea91b6f4d3554758a4f5a5
def update(self, rcv_time, dp_id, msg): 'Handle the responses to requests.\n\n Called when a reply to a stats request sent by this object is received\n by the controller.\n\n It should acknowledge the receipt by setting self.reply_pending to\n false.\n\n Arguments:\n rcv_time -- the time the response was received\n dp_id -- DP ID\n msg -- the stats reply message\n ' self.reply_pending = False
Handle the responses to requests. Called when a reply to a stats request sent by this object is received by the controller. It should acknowledge the receipt by setting self.reply_pending to false. Arguments: rcv_time -- the time the response was received dp_id -- DP ID msg -- the stats reply message
faucet/gauge_pollers.py
update
libunamari/faucet-tests
0
python
def update(self, rcv_time, dp_id, msg): 'Handle the responses to requests.\n\n Called when a reply to a stats request sent by this object is received\n by the controller.\n\n It should acknowledge the receipt by setting self.reply_pending to\n false.\n\n Arguments:\n rcv_time -- the time the response was received\n dp_id -- DP ID\n msg -- the stats reply message\n ' self.reply_pending = False
def update(self, rcv_time, dp_id, msg): 'Handle the responses to requests.\n\n Called when a reply to a stats request sent by this object is received\n by the controller.\n\n It should acknowledge the receipt by setting self.reply_pending to\n false.\n\n Arguments:\n rcv_time -- the time the response was received\n dp_id -- DP ID\n msg -- the stats reply message\n ' self.reply_pending = False<|docstring|>Handle the responses to requests. Called when a reply to a stats request sent by this object is received by the controller. It should acknowledge the receipt by setting self.reply_pending to false. Arguments: rcv_time -- the time the response was received dp_id -- DP ID msg -- the stats reply message<|endoftext|>
a87eb7dda7d2ef8241a7a5634fa7a682604389145a43133f2fe59f5d5c33cab4
def no_response(self): 'Called when a polling cycle passes without receiving a response.' raise NotImplementedError
Called when a polling cycle passes without receiving a response.
faucet/gauge_pollers.py
no_response
libunamari/faucet-tests
0
python
def no_response(self): raise NotImplementedError
def no_response(self): raise NotImplementedError<|docstring|>Called when a polling cycle passes without receiving a response.<|endoftext|>
469bf8973335f0cf16911d322f500792ed4464838fce78469ea403349d986212
def __call__(self): 'Send request loop.\n\n Delays the initial request for a random interval to reduce load.\n Then sends a request to the datapath, waits the specified interval and\n checks that a response has been received in a loop.' hub.sleep(random.randint(1, self.conf.interval)) while True: self.send_req() self.reply_pending = True hub.sleep(self.conf.interval) if self.reply_pending: self.no_response()
Send request loop. Delays the initial request for a random interval to reduce load. Then sends a request to the datapath, waits the specified interval and checks that a response has been received in a loop.
faucet/gauge_pollers.py
__call__
libunamari/faucet-tests
0
python
def __call__(self): 'Send request loop.\n\n Delays the initial request for a random interval to reduce load.\n Then sends a request to the datapath, waits the specified interval and\n checks that a response has been received in a loop.' hub.sleep(random.randint(1, self.conf.interval)) while True: self.send_req() self.reply_pending = True hub.sleep(self.conf.interval) if self.reply_pending: self.no_response()
def __call__(self): 'Send request loop.\n\n Delays the initial request for a random interval to reduce load.\n Then sends a request to the datapath, waits the specified interval and\n checks that a response has been received in a loop.' hub.sleep(random.randint(1, self.conf.interval)) while True: self.send_req() self.reply_pending = True hub.sleep(self.conf.interval) if self.reply_pending: self.no_response()<|docstring|>Send request loop. Delays the initial request for a random interval to reduce load. Then sends a request to the datapath, waits the specified interval and checks that a response has been received in a loop.<|endoftext|>
c58b97a29a83682f40037cbaeb6a7701a5e33f10e135f1156f2529ef6ea6131d
@gf.cell def taper_parabolic(length: float=20, width1: float=0.5, width2: float=5.0, exp: float=0.5, npoints: int=100, layer: Layer=(1, 0)) -> gf.Component: 'Returns a parabolic_taper\n\n Args:\n length:\n width1:\n width2:\n exp: exponent\n npoints: number of points\n layer\n ' x = np.linspace(0, 1, npoints) y = (transition_exponential(y1=width1, y2=width2, exp=exp)(x) / 2) x = (length * x) points1 = np.array([x, y]).T points2 = np.flipud(np.array([x, (- y)]).T) points = np.concatenate([points1, points2]) c = gf.Component() c.add_polygon(points, layer=layer) c.add_port(name='o1', midpoint=(0, 0), width=width1, orientation=180) c.add_port(name='o2', midpoint=(length, 0), width=width2, orientation=0) return c
Returns a parabolic_taper Args: length: width1: width2: exp: exponent npoints: number of points layer
gdsfactory/components/taper_parabolic.py
taper_parabolic
jorgepadilla19/gdsfactory
42
python
@gf.cell def taper_parabolic(length: float=20, width1: float=0.5, width2: float=5.0, exp: float=0.5, npoints: int=100, layer: Layer=(1, 0)) -> gf.Component: 'Returns a parabolic_taper\n\n Args:\n length:\n width1:\n width2:\n exp: exponent\n npoints: number of points\n layer\n ' x = np.linspace(0, 1, npoints) y = (transition_exponential(y1=width1, y2=width2, exp=exp)(x) / 2) x = (length * x) points1 = np.array([x, y]).T points2 = np.flipud(np.array([x, (- y)]).T) points = np.concatenate([points1, points2]) c = gf.Component() c.add_polygon(points, layer=layer) c.add_port(name='o1', midpoint=(0, 0), width=width1, orientation=180) c.add_port(name='o2', midpoint=(length, 0), width=width2, orientation=0) return c
@gf.cell def taper_parabolic(length: float=20, width1: float=0.5, width2: float=5.0, exp: float=0.5, npoints: int=100, layer: Layer=(1, 0)) -> gf.Component: 'Returns a parabolic_taper\n\n Args:\n length:\n width1:\n width2:\n exp: exponent\n npoints: number of points\n layer\n ' x = np.linspace(0, 1, npoints) y = (transition_exponential(y1=width1, y2=width2, exp=exp)(x) / 2) x = (length * x) points1 = np.array([x, y]).T points2 = np.flipud(np.array([x, (- y)]).T) points = np.concatenate([points1, points2]) c = gf.Component() c.add_polygon(points, layer=layer) c.add_port(name='o1', midpoint=(0, 0), width=width1, orientation=180) c.add_port(name='o2', midpoint=(length, 0), width=width2, orientation=0) return c<|docstring|>Returns a parabolic_taper Args: length: width1: width2: exp: exponent npoints: number of points layer<|endoftext|>
0bd834df4cab998394a2080bc0345745be4c03ad8ca8a767a3a09ad7c22408eb
def _custom_request_matcher(r1, r2): ' Ensure method, path, and query parameters match. ' if (r1.method != r2.method): return False url1 = urlparse(r1.uri) url2 = urlparse(r2.uri) if (url1.path != url2.path): return False q1 = parse_qs(url1.query) q2 = parse_qs(url2.query) shared_keys = set(q1.keys()).intersection(set(q2.keys())) if ((len(shared_keys) != len(q1)) or (len(shared_keys) != len(q2))): return False for key in shared_keys: if (q1[key][0].lower() != q2[key][0].lower()): return False return True
Ensure method, path, and query parameters match.
src/azure-cli-testsdk/azure/cli/testsdk/vcr_test_base.py
_custom_request_matcher
viananth/azure-cli
0
python
def _custom_request_matcher(r1, r2): ' ' if (r1.method != r2.method): return False url1 = urlparse(r1.uri) url2 = urlparse(r2.uri) if (url1.path != url2.path): return False q1 = parse_qs(url1.query) q2 = parse_qs(url2.query) shared_keys = set(q1.keys()).intersection(set(q2.keys())) if ((len(shared_keys) != len(q1)) or (len(shared_keys) != len(q2))): return False for key in shared_keys: if (q1[key][0].lower() != q2[key][0].lower()): return False return True
def _custom_request_matcher(r1, r2): ' ' if (r1.method != r2.method): return False url1 = urlparse(r1.uri) url2 = urlparse(r2.uri) if (url1.path != url2.path): return False q1 = parse_qs(url1.query) q2 = parse_qs(url2.query) shared_keys = set(q1.keys()).intersection(set(q2.keys())) if ((len(shared_keys) != len(q1)) or (len(shared_keys) != len(q2))): return False for key in shared_keys: if (q1[key][0].lower() != q2[key][0].lower()): return False return True<|docstring|>Ensure method, path, and query parameters match.<|endoftext|>
ac38c01a1aa78e8cd4f12fb182a6082b6be0532b7007ab5f8907ef82c985b2b3
def _post_recording_scrub(self): " Perform post-recording cleanup on the YAML file that can't be accomplished with the\n VCR recording hooks. " src_path = self.cassette_path rg_name = getattr(self, 'resource_group', None) rg_original = getattr(self, 'resource_group_original', None) t = tempfile.NamedTemporaryFile('r+') with open(src_path, 'r') as f: for line in f: if (rg_original and (rg_name != rg_original)): line = line.replace(rg_name, rg_original) if ('authorization:' not in line.lower()): t.write(line) t.seek(0) with open(src_path, 'w') as f: for line in t: f.write(line) t.close()
Perform post-recording cleanup on the YAML file that can't be accomplished with the VCR recording hooks.
src/azure-cli-testsdk/azure/cli/testsdk/vcr_test_base.py
_post_recording_scrub
viananth/azure-cli
0
python
def _post_recording_scrub(self): " Perform post-recording cleanup on the YAML file that can't be accomplished with the\n VCR recording hooks. " src_path = self.cassette_path rg_name = getattr(self, 'resource_group', None) rg_original = getattr(self, 'resource_group_original', None) t = tempfile.NamedTemporaryFile('r+') with open(src_path, 'r') as f: for line in f: if (rg_original and (rg_name != rg_original)): line = line.replace(rg_name, rg_original) if ('authorization:' not in line.lower()): t.write(line) t.seek(0) with open(src_path, 'w') as f: for line in t: f.write(line) t.close()
def _post_recording_scrub(self): " Perform post-recording cleanup on the YAML file that can't be accomplished with the\n VCR recording hooks. " src_path = self.cassette_path rg_name = getattr(self, 'resource_group', None) rg_original = getattr(self, 'resource_group_original', None) t = tempfile.NamedTemporaryFile('r+') with open(src_path, 'r') as f: for line in f: if (rg_original and (rg_name != rg_original)): line = line.replace(rg_name, rg_original) if ('authorization:' not in line.lower()): t.write(line) t.seek(0) with open(src_path, 'w') as f: for line in t: f.write(line) t.close()<|docstring|>Perform post-recording cleanup on the YAML file that can't be accomplished with the VCR recording hooks.<|endoftext|>