body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
ad88ce277d971c4c9e329b9260a8fadf1e76c46b6bb34f7f11ad3cc27a1c735b
def get_neighbour_mask(arr, max_val=1, neighbour_dist=1): '\n Convolve a linear filter (default: 3x3, i.e. 1 neighbour on each side), reflecting\n at the boundaries (i.e. as if convolving on an image expanded by one pixel at each\n border) and then compare the result against the maximum possible value, `max_val`\n (default: 1) from the kernel (i.e. thereby report if a given pixel is completely \n surrounded by the maximum value).\n ' kernel_shape = np.repeat((1 + (2 * neighbour_dist)), 2) kernel = np.ones(kernel_shape) kernel_max = (kernel.sum() * max_val) mask = (convolve(arr, kernel) == kernel_max) return mask
Convolve a linear filter (default: 3x3, i.e. 1 neighbour on each side), reflecting at the boundaries (i.e. as if convolving on an image expanded by one pixel at each border) and then compare the result against the maximum possible value, `max_val` (default: 1) from the kernel (i.e. thereby report if a given pixel is completely surrounded by the maximum value).
reestimate_leaf_sr_transparency.py
get_neighbour_mask
lmmx/emoji-liif
1
python
def get_neighbour_mask(arr, max_val=1, neighbour_dist=1): '\n Convolve a linear filter (default: 3x3, i.e. 1 neighbour on each side), reflecting\n at the boundaries (i.e. as if convolving on an image expanded by one pixel at each\n border) and then compare the result against the maximum possible value, `max_val`\n (default: 1) from the kernel (i.e. thereby report if a given pixel is completely \n surrounded by the maximum value).\n ' kernel_shape = np.repeat((1 + (2 * neighbour_dist)), 2) kernel = np.ones(kernel_shape) kernel_max = (kernel.sum() * max_val) mask = (convolve(arr, kernel) == kernel_max) return mask
def get_neighbour_mask(arr, max_val=1, neighbour_dist=1): '\n Convolve a linear filter (default: 3x3, i.e. 1 neighbour on each side), reflecting\n at the boundaries (i.e. as if convolving on an image expanded by one pixel at each\n border) and then compare the result against the maximum possible value, `max_val`\n (default: 1) from the kernel (i.e. thereby report if a given pixel is completely \n surrounded by the maximum value).\n ' kernel_shape = np.repeat((1 + (2 * neighbour_dist)), 2) kernel = np.ones(kernel_shape) kernel_max = (kernel.sum() * max_val) mask = (convolve(arr, kernel) == kernel_max) return mask<|docstring|>Convolve a linear filter (default: 3x3, i.e. 1 neighbour on each side), reflecting at the boundaries (i.e. as if convolving on an image expanded by one pixel at each border) and then compare the result against the maximum possible value, `max_val` (default: 1) from the kernel (i.e. thereby report if a given pixel is completely surrounded by the maximum value).<|endoftext|>
be43c29bd2763985614f50119d855787ad32bca4d2b91a6210e73ddc232d9ea1
def alpha_composite_bg(img, background_shade): '\n Linearly composite an RGBA image against a grayscale background. Image dtype\n is preserved. Output height/width will match those of `im`, but the alpha\n channel dimension will be dropped making it only RGB.\n ' if (not isinstance(background_shade, int)): raise TypeError('background_shade must be an integer') im = img.astype(float) bg = (background_shade / 255) im_max = im.max() im /= im_max im_rgb = im[(:, :, :3)] bg_rgb = (np.ones_like(im_rgb) * bg) alpha_im = im[(:, :, 3)] alpha_bg = (1 - alpha_im) im_rgb *= alpha_im[(:, :, None)] bg_rgb *= alpha_bg[(:, :, None)] composited = (im_rgb + bg_rgb) composited *= im_max composited = composited.astype(img.dtype) return composited
Linearly composite an RGBA image against a grayscale background. Image dtype is preserved. Output height/width will match those of `im`, but the alpha channel dimension will be dropped making it only RGB.
reestimate_leaf_sr_transparency.py
alpha_composite_bg
lmmx/emoji-liif
1
python
def alpha_composite_bg(img, background_shade): '\n Linearly composite an RGBA image against a grayscale background. Image dtype\n is preserved. Output height/width will match those of `im`, but the alpha\n channel dimension will be dropped making it only RGB.\n ' if (not isinstance(background_shade, int)): raise TypeError('background_shade must be an integer') im = img.astype(float) bg = (background_shade / 255) im_max = im.max() im /= im_max im_rgb = im[(:, :, :3)] bg_rgb = (np.ones_like(im_rgb) * bg) alpha_im = im[(:, :, 3)] alpha_bg = (1 - alpha_im) im_rgb *= alpha_im[(:, :, None)] bg_rgb *= alpha_bg[(:, :, None)] composited = (im_rgb + bg_rgb) composited *= im_max composited = composited.astype(img.dtype) return composited
def alpha_composite_bg(img, background_shade): '\n Linearly composite an RGBA image against a grayscale background. Image dtype\n is preserved. Output height/width will match those of `im`, but the alpha\n channel dimension will be dropped making it only RGB.\n ' if (not isinstance(background_shade, int)): raise TypeError('background_shade must be an integer') im = img.astype(float) bg = (background_shade / 255) im_max = im.max() im /= im_max im_rgb = im[(:, :, :3)] bg_rgb = (np.ones_like(im_rgb) * bg) alpha_im = im[(:, :, 3)] alpha_bg = (1 - alpha_im) im_rgb *= alpha_im[(:, :, None)] bg_rgb *= alpha_bg[(:, :, None)] composited = (im_rgb + bg_rgb) composited *= im_max composited = composited.astype(img.dtype) return composited<|docstring|>Linearly composite an RGBA image against a grayscale background. Image dtype is preserved. Output height/width will match those of `im`, but the alpha channel dimension will be dropped making it only RGB.<|endoftext|>
6ce28f65864b9e4cb19c232dfb9ebd4e5c0d263fd63f2d44d64324b917d0eef1
def rescale_float_img_to_0_255_int64(float_img): '\n tf.resize has not preserved range, so multiply the `float_img` by the reciprocal of\n its maximum (to restore to the range [0,1]) then transform to [0,255] then convert\n the uint8 type to int64\n ' if (float_img < 0).any(): raise ValueError('Error: about to clip sub-zero values via `img_as_ubyte`') return img_as_ubyte((float_img * np.reciprocal(float_img.max()))).astype(int)
tf.resize has not preserved range, so multiply the `float_img` by the reciprocal of its maximum (to restore to the range [0,1]) then transform to [0,255] then convert the uint8 type to int64
reestimate_leaf_sr_transparency.py
rescale_float_img_to_0_255_int64
lmmx/emoji-liif
1
python
def rescale_float_img_to_0_255_int64(float_img): '\n tf.resize has not preserved range, so multiply the `float_img` by the reciprocal of\n its maximum (to restore to the range [0,1]) then transform to [0,255] then convert\n the uint8 type to int64\n ' if (float_img < 0).any(): raise ValueError('Error: about to clip sub-zero values via `img_as_ubyte`') return img_as_ubyte((float_img * np.reciprocal(float_img.max()))).astype(int)
def rescale_float_img_to_0_255_int64(float_img): '\n tf.resize has not preserved range, so multiply the `float_img` by the reciprocal of\n its maximum (to restore to the range [0,1]) then transform to [0,255] then convert\n the uint8 type to int64\n ' if (float_img < 0).any(): raise ValueError('Error: about to clip sub-zero values via `img_as_ubyte`') return img_as_ubyte((float_img * np.reciprocal(float_img.max()))).astype(int)<|docstring|>tf.resize has not preserved range, so multiply the `float_img` by the reciprocal of its maximum (to restore to the range [0,1]) then transform to [0,255] then convert the uint8 type to int64<|endoftext|>
da9f2d61b2e58e3b5f4b7fac7ca383362632af1ecba86d2d8ba7b1a5ea2fcbb1
def to_dwc(observations: AnyObservations, filename: str): 'Convert observations into to a Simple Darwin Core RecordSet' import xmltodict records = [observation_to_dwc_record(obs) for obs in flatten_observations(observations)] record_set = get_dwc_record_set(records) record_xml = xmltodict.unparse(record_set, pretty=True, indent=(' ' * 4)) write(record_xml, filename)
Convert observations into to a Simple Darwin Core RecordSet
pyinaturalist_convert/dwc.py
to_dwc
FelipeSBarros/pyinaturalist-convert
6
python
def to_dwc(observations: AnyObservations, filename: str): import xmltodict records = [observation_to_dwc_record(obs) for obs in flatten_observations(observations)] record_set = get_dwc_record_set(records) record_xml = xmltodict.unparse(record_set, pretty=True, indent=(' ' * 4)) write(record_xml, filename)
def to_dwc(observations: AnyObservations, filename: str): import xmltodict records = [observation_to_dwc_record(obs) for obs in flatten_observations(observations)] record_set = get_dwc_record_set(records) record_xml = xmltodict.unparse(record_set, pretty=True, indent=(' ' * 4)) write(record_xml, filename)<|docstring|>Convert observations into to a Simple Darwin Core RecordSet<|endoftext|>
55b0007f135cd8af4059e8ec7a034d38a9137d12e5d03d8c8e99527a4e28c2e4
def observation_to_dwc_record(observation: Dict) -> Dict: 'Translate a flattened JSON observation from API results to a DwC record' dwc_record = {} observation = add_taxon_ancestors(observation) for (inat_field, dwc_fields) in OBSERVATION_FIELDS.items(): for dwc_field in ensure_str_list(dwc_fields): dwc_record[dwc_field] = observation[inat_field] dwc_record['dcterms:license'] = format_license(observation['license_code']) dwc_record['dwc:datasetName'] = format_dataset_name(observation['quality_grade']) photos = [photo_to_data_object(photo) for photo in observation['photos']] dwc_record['eol:dataObject'] = photos for (dwc_field, value) in CONSTANTS.items(): dwc_record[dwc_field] = value return dwc_record
Translate a flattened JSON observation from API results to a DwC record
pyinaturalist_convert/dwc.py
observation_to_dwc_record
FelipeSBarros/pyinaturalist-convert
6
python
def observation_to_dwc_record(observation: Dict) -> Dict: dwc_record = {} observation = add_taxon_ancestors(observation) for (inat_field, dwc_fields) in OBSERVATION_FIELDS.items(): for dwc_field in ensure_str_list(dwc_fields): dwc_record[dwc_field] = observation[inat_field] dwc_record['dcterms:license'] = format_license(observation['license_code']) dwc_record['dwc:datasetName'] = format_dataset_name(observation['quality_grade']) photos = [photo_to_data_object(photo) for photo in observation['photos']] dwc_record['eol:dataObject'] = photos for (dwc_field, value) in CONSTANTS.items(): dwc_record[dwc_field] = value return dwc_record
def observation_to_dwc_record(observation: Dict) -> Dict: dwc_record = {} observation = add_taxon_ancestors(observation) for (inat_field, dwc_fields) in OBSERVATION_FIELDS.items(): for dwc_field in ensure_str_list(dwc_fields): dwc_record[dwc_field] = observation[inat_field] dwc_record['dcterms:license'] = format_license(observation['license_code']) dwc_record['dwc:datasetName'] = format_dataset_name(observation['quality_grade']) photos = [photo_to_data_object(photo) for photo in observation['photos']] dwc_record['eol:dataObject'] = photos for (dwc_field, value) in CONSTANTS.items(): dwc_record[dwc_field] = value return dwc_record<|docstring|>Translate a flattened JSON observation from API results to a DwC record<|endoftext|>
84510095bfd4eaa67c843f0d50a2183d2a849e9306ef5a2affd0d648f44b9e58
def photo_to_data_object(photo: Dict) -> Dict: 'Translate observation photo fields to eol:dataObject fields' dwc_photo = {} for (inat_field, dwc_fields) in PHOTO_FIELDS.items(): for dwc_field in ensure_str_list(dwc_fields): dwc_photo[dwc_field] = photo[inat_field] for (dwc_field, value) in PHOTO_CONSTANTS.items(): dwc_photo[dwc_field] = value dwc_photo['xap:UsageTerms'] = format_license(photo['license_code']) return dwc_photo
Translate observation photo fields to eol:dataObject fields
pyinaturalist_convert/dwc.py
photo_to_data_object
FelipeSBarros/pyinaturalist-convert
6
python
def photo_to_data_object(photo: Dict) -> Dict: dwc_photo = {} for (inat_field, dwc_fields) in PHOTO_FIELDS.items(): for dwc_field in ensure_str_list(dwc_fields): dwc_photo[dwc_field] = photo[inat_field] for (dwc_field, value) in PHOTO_CONSTANTS.items(): dwc_photo[dwc_field] = value dwc_photo['xap:UsageTerms'] = format_license(photo['license_code']) return dwc_photo
def photo_to_data_object(photo: Dict) -> Dict: dwc_photo = {} for (inat_field, dwc_fields) in PHOTO_FIELDS.items(): for dwc_field in ensure_str_list(dwc_fields): dwc_photo[dwc_field] = photo[inat_field] for (dwc_field, value) in PHOTO_CONSTANTS.items(): dwc_photo[dwc_field] = value dwc_photo['xap:UsageTerms'] = format_license(photo['license_code']) return dwc_photo<|docstring|>Translate observation photo fields to eol:dataObject fields<|endoftext|>
d681d564ae94a4c044e1a069b40e142d4d3a0ee405c41e59c8539df071b9001e
def get_dwc_record_set(records: List[Dict]) -> Dict: 'Make a DwC RecordSet including XML namespaces and the provided observation records' namespaces = {f'@{k}': v for (k, v) in XML_NAMESPACES.items()} return {'dwr:SimpleDarwinRecordSet': {**namespaces, 'dwr:SimpleDarwinRecord': records}}
Make a DwC RecordSet including XML namespaces and the provided observation records
pyinaturalist_convert/dwc.py
get_dwc_record_set
FelipeSBarros/pyinaturalist-convert
6
python
def get_dwc_record_set(records: List[Dict]) -> Dict: namespaces = {f'@{k}': v for (k, v) in XML_NAMESPACES.items()} return {'dwr:SimpleDarwinRecordSet': {**namespaces, 'dwr:SimpleDarwinRecord': records}}
def get_dwc_record_set(records: List[Dict]) -> Dict: namespaces = {f'@{k}': v for (k, v) in XML_NAMESPACES.items()} return {'dwr:SimpleDarwinRecordSet': {**namespaces, 'dwr:SimpleDarwinRecord': records}}<|docstring|>Make a DwC RecordSet including XML namespaces and the provided observation records<|endoftext|>
c3c9e41b321c24568dc5d14c788414ad62fb23eca46e828bfe73523df0b5155e
def add_taxon_ancestors(observation): "observation['taxon'] doesn't have full ancestry, so we'll need to get that from the\n /taxa endpoint\n " response = get_taxa_by_id(observation['taxon.id']) taxon = response['results'][0] for ancestor in taxon['ancestors']: observation[f"taxon.{ancestor['rank']}"] = ancestor['name'] return observation
observation['taxon'] doesn't have full ancestry, so we'll need to get that from the /taxa endpoint
pyinaturalist_convert/dwc.py
add_taxon_ancestors
FelipeSBarros/pyinaturalist-convert
6
python
def add_taxon_ancestors(observation): "observation['taxon'] doesn't have full ancestry, so we'll need to get that from the\n /taxa endpoint\n " response = get_taxa_by_id(observation['taxon.id']) taxon = response['results'][0] for ancestor in taxon['ancestors']: observation[f"taxon.{ancestor['rank']}"] = ancestor['name'] return observation
def add_taxon_ancestors(observation): "observation['taxon'] doesn't have full ancestry, so we'll need to get that from the\n /taxa endpoint\n " response = get_taxa_by_id(observation['taxon.id']) taxon = response['results'][0] for ancestor in taxon['ancestors']: observation[f"taxon.{ancestor['rank']}"] = ancestor['name'] return observation<|docstring|>observation['taxon'] doesn't have full ancestry, so we'll need to get that from the /taxa endpoint<|endoftext|>
3a26a44e268f1d9fd5406c886b31d5223e76c4d8454ce6cf1b213dc8a4ee1bcf
def format_license(license_code: str) -> str: 'Format a Creative Commons license code into a URL with its license information.\n Example: ``CC-BY-NC --> https://creativecommons.org/licenses/by-nc/4.0/``\n ' url_slug = license_code.lower().replace('cc-', '') return f'{CC_BASE_URL}/{url_slug}/{CC_VERSION}'
Format a Creative Commons license code into a URL with its license information. Example: ``CC-BY-NC --> https://creativecommons.org/licenses/by-nc/4.0/``
pyinaturalist_convert/dwc.py
format_license
FelipeSBarros/pyinaturalist-convert
6
python
def format_license(license_code: str) -> str: 'Format a Creative Commons license code into a URL with its license information.\n Example: ``CC-BY-NC --> https://creativecommons.org/licenses/by-nc/4.0/``\n ' url_slug = license_code.lower().replace('cc-', ) return f'{CC_BASE_URL}/{url_slug}/{CC_VERSION}'
def format_license(license_code: str) -> str: 'Format a Creative Commons license code into a URL with its license information.\n Example: ``CC-BY-NC --> https://creativecommons.org/licenses/by-nc/4.0/``\n ' url_slug = license_code.lower().replace('cc-', ) return f'{CC_BASE_URL}/{url_slug}/{CC_VERSION}'<|docstring|>Format a Creative Commons license code into a URL with its license information. Example: ``CC-BY-NC --> https://creativecommons.org/licenses/by-nc/4.0/``<|endoftext|>
e7176f3521c82f5ab842b10b29b75341d92f563f78e0bb0ee6782ffdaae12e67
def test_observation_to_dwc(): 'Get a test observation, convert it to DwC, and write it to a file' response = get_observations(id=45524803) observation = response['results'][0] to_dwc(observation, join('test', 'sample_data', 'observations.dwc'))
Get a test observation, convert it to DwC, and write it to a file
pyinaturalist_convert/dwc.py
test_observation_to_dwc
FelipeSBarros/pyinaturalist-convert
6
python
def test_observation_to_dwc(): response = get_observations(id=45524803) observation = response['results'][0] to_dwc(observation, join('test', 'sample_data', 'observations.dwc'))
def test_observation_to_dwc(): response = get_observations(id=45524803) observation = response['results'][0] to_dwc(observation, join('test', 'sample_data', 'observations.dwc'))<|docstring|>Get a test observation, convert it to DwC, and write it to a file<|endoftext|>
35f90d6d27e99712cad063017d6a3d90445435b61480f71bcd7d28c5f2e0f05b
def _fixture_setup(self): "Finds a list called :attr:`datasets` and loads them\n\n This is done in a transaction if possible.\n I'm not using the settings.DATABASE_SUPPORTS_TRANSACTIONS as I don't\n wnat to assume that :meth:`connection.create_test_db` might not have been\n called\n " if check_supports_transactions(connection): transaction.enter_transaction_management() transaction.managed(True) testcases.disable_transaction_methods() from django.contrib.sites.models import Site Site.objects.clear_cache() if (not hasattr(self, 'fixture')): self.fixture = DjangoFixture() if hasattr(self, 'datasets'): self.data = self.fixture.data(*self.datasets) self.data.setup()
Finds a list called :attr:`datasets` and loads them This is done in a transaction if possible. I'm not using the settings.DATABASE_SUPPORTS_TRANSACTIONS as I don't wnat to assume that :meth:`connection.create_test_db` might not have been called
gae_app/lib/fixture/django_testcase.py
_fixture_setup
dcifuen/gentlemeet
1
python
def _fixture_setup(self): "Finds a list called :attr:`datasets` and loads them\n\n This is done in a transaction if possible.\n I'm not using the settings.DATABASE_SUPPORTS_TRANSACTIONS as I don't\n wnat to assume that :meth:`connection.create_test_db` might not have been\n called\n " if check_supports_transactions(connection): transaction.enter_transaction_management() transaction.managed(True) testcases.disable_transaction_methods() from django.contrib.sites.models import Site Site.objects.clear_cache() if (not hasattr(self, 'fixture')): self.fixture = DjangoFixture() if hasattr(self, 'datasets'): self.data = self.fixture.data(*self.datasets) self.data.setup()
def _fixture_setup(self): "Finds a list called :attr:`datasets` and loads them\n\n This is done in a transaction if possible.\n I'm not using the settings.DATABASE_SUPPORTS_TRANSACTIONS as I don't\n wnat to assume that :meth:`connection.create_test_db` might not have been\n called\n " if check_supports_transactions(connection): transaction.enter_transaction_management() transaction.managed(True) testcases.disable_transaction_methods() from django.contrib.sites.models import Site Site.objects.clear_cache() if (not hasattr(self, 'fixture')): self.fixture = DjangoFixture() if hasattr(self, 'datasets'): self.data = self.fixture.data(*self.datasets) self.data.setup()<|docstring|>Finds a list called :attr:`datasets` and loads them This is done in a transaction if possible. I'm not using the settings.DATABASE_SUPPORTS_TRANSACTIONS as I don't wnat to assume that :meth:`connection.create_test_db` might not have been called<|endoftext|>
9c8714879e9eb2293b180977c4ac304721ff325adca045f9cf59e541696e7cfb
def _fixture_teardown(self): 'Finds an attribute called :attr:`data` and runs teardown on it\n\n (data is created by :meth:`_fixture_setup`)\n ' if hasattr(self, 'data'): self.data.teardown() if check_supports_transactions(connection): testcases.restore_transaction_methods() transaction.rollback() transaction.leave_transaction_management() connection.close()
Finds an attribute called :attr:`data` and runs teardown on it (data is created by :meth:`_fixture_setup`)
gae_app/lib/fixture/django_testcase.py
_fixture_teardown
dcifuen/gentlemeet
1
python
def _fixture_teardown(self): 'Finds an attribute called :attr:`data` and runs teardown on it\n\n (data is created by :meth:`_fixture_setup`)\n ' if hasattr(self, 'data'): self.data.teardown() if check_supports_transactions(connection): testcases.restore_transaction_methods() transaction.rollback() transaction.leave_transaction_management() connection.close()
def _fixture_teardown(self): 'Finds an attribute called :attr:`data` and runs teardown on it\n\n (data is created by :meth:`_fixture_setup`)\n ' if hasattr(self, 'data'): self.data.teardown() if check_supports_transactions(connection): testcases.restore_transaction_methods() transaction.rollback() transaction.leave_transaction_management() connection.close()<|docstring|>Finds an attribute called :attr:`data` and runs teardown on it (data is created by :meth:`_fixture_setup`)<|endoftext|>
acca9d62c40e8da1b85a570030ab024d474582811a49f4302b28d5dab93743c6
def __init__(self, h5_file, imgs=None, segs=None, dx=None, compress=True): " Initialize a dataset manager\n\n Parameters\n ----------\n h5_file: str\n A (possibly already existing) hdf5 dataset\n\n imgs: List(ndarray), default=None\n List of images. Necessary when `h5_file` doe not exist.\n\n segs: List(ndarray), default=None\n List of respective segmentations for :code:`imgs`. Necessary when\n `h5_file` doe not exist.\n\n dx: List(ndarray), default=None\n The delta spacing along the axis directions in the provided\n images and segmentations. Used when `h5_file` doesn't exist. In\n that case, the default of None uses all ones.\n\n compress: bool, default=True\n When the image and segmentation data are stored in the hdf5 file\n this flag indicates whether or not to use compression.\n\n Note\n ----\n Either :code:`h5_file` should be the name of an existing h5 file with\n appropriate structure (see :method:`convert_to_hdf5`) or `imgs`\n and `segs` should be non-None, and the hdf5 file will creating\n using the provided images and segmentations and we be named with the\n argument `h5_file`.\n\n " self.h5_file = os.path.abspath(h5_file) self.datasets = {dataset_key: [] for dataset_key in self._iterate_dataset_keys()} if (not os.path.exists(self.h5_file)): if ((imgs is None) or (segs is None)): msg = "Provided `h5_file` {} doesn't exist but no image or segmentation data provided" raise ValueError(msg.format(h5_file)) self.convert_to_hdf5(imgs=imgs, segs=segs, dx=dx, compress=compress) with h5py.File(self.h5_file, mode='r') as hf: self.n_examples = len(hf.keys())
Initialize a dataset manager Parameters ---------- h5_file: str A (possibly already existing) hdf5 dataset imgs: List(ndarray), default=None List of images. Necessary when `h5_file` doe not exist. segs: List(ndarray), default=None List of respective segmentations for :code:`imgs`. Necessary when `h5_file` doe not exist. dx: List(ndarray), default=None The delta spacing along the axis directions in the provided images and segmentations. Used when `h5_file` doesn't exist. In that case, the default of None uses all ones. compress: bool, default=True When the image and segmentation data are stored in the hdf5 file this flag indicates whether or not to use compression. Note ---- Either :code:`h5_file` should be the name of an existing h5 file with appropriate structure (see :method:`convert_to_hdf5`) or `imgs` and `segs` should be non-None, and the hdf5 file will creating using the provided images and segmentations and we be named with the argument `h5_file`.
lsml/core/datasets_handler.py
__init__
sandeepdas05/lsm-crack-width
24
python
def __init__(self, h5_file, imgs=None, segs=None, dx=None, compress=True): " Initialize a dataset manager\n\n Parameters\n ----------\n h5_file: str\n A (possibly already existing) hdf5 dataset\n\n imgs: List(ndarray), default=None\n List of images. Necessary when `h5_file` doe not exist.\n\n segs: List(ndarray), default=None\n List of respective segmentations for :code:`imgs`. Necessary when\n `h5_file` doe not exist.\n\n dx: List(ndarray), default=None\n The delta spacing along the axis directions in the provided\n images and segmentations. Used when `h5_file` doesn't exist. In\n that case, the default of None uses all ones.\n\n compress: bool, default=True\n When the image and segmentation data are stored in the hdf5 file\n this flag indicates whether or not to use compression.\n\n Note\n ----\n Either :code:`h5_file` should be the name of an existing h5 file with\n appropriate structure (see :method:`convert_to_hdf5`) or `imgs`\n and `segs` should be non-None, and the hdf5 file will creating\n using the provided images and segmentations and we be named with the\n argument `h5_file`.\n\n " self.h5_file = os.path.abspath(h5_file) self.datasets = {dataset_key: [] for dataset_key in self._iterate_dataset_keys()} if (not os.path.exists(self.h5_file)): if ((imgs is None) or (segs is None)): msg = "Provided `h5_file` {} doesn't exist but no image or segmentation data provided" raise ValueError(msg.format(h5_file)) self.convert_to_hdf5(imgs=imgs, segs=segs, dx=dx, compress=compress) with h5py.File(self.h5_file, mode='r') as hf: self.n_examples = len(hf.keys())
def __init__(self, h5_file, imgs=None, segs=None, dx=None, compress=True): " Initialize a dataset manager\n\n Parameters\n ----------\n h5_file: str\n A (possibly already existing) hdf5 dataset\n\n imgs: List(ndarray), default=None\n List of images. Necessary when `h5_file` doe not exist.\n\n segs: List(ndarray), default=None\n List of respective segmentations for :code:`imgs`. Necessary when\n `h5_file` doe not exist.\n\n dx: List(ndarray), default=None\n The delta spacing along the axis directions in the provided\n images and segmentations. Used when `h5_file` doesn't exist. In\n that case, the default of None uses all ones.\n\n compress: bool, default=True\n When the image and segmentation data are stored in the hdf5 file\n this flag indicates whether or not to use compression.\n\n Note\n ----\n Either :code:`h5_file` should be the name of an existing h5 file with\n appropriate structure (see :method:`convert_to_hdf5`) or `imgs`\n and `segs` should be non-None, and the hdf5 file will creating\n using the provided images and segmentations and we be named with the\n argument `h5_file`.\n\n " self.h5_file = os.path.abspath(h5_file) self.datasets = {dataset_key: [] for dataset_key in self._iterate_dataset_keys()} if (not os.path.exists(self.h5_file)): if ((imgs is None) or (segs is None)): msg = "Provided `h5_file` {} doesn't exist but no image or segmentation data provided" raise ValueError(msg.format(h5_file)) self.convert_to_hdf5(imgs=imgs, segs=segs, dx=dx, compress=compress) with h5py.File(self.h5_file, mode='r') as hf: self.n_examples = len(hf.keys())<|docstring|>Initialize a dataset manager Parameters ---------- h5_file: str A (possibly already existing) hdf5 dataset imgs: List(ndarray), default=None List of images. Necessary when `h5_file` doe not exist. segs: List(ndarray), default=None List of respective segmentations for :code:`imgs`. Necessary when `h5_file` doe not exist. dx: List(ndarray), default=None The delta spacing along the axis directions in the provided images and segmentations. Used when `h5_file` doesn't exist. In that case, the default of None uses all ones. compress: bool, default=True When the image and segmentation data are stored in the hdf5 file this flag indicates whether or not to use compression. Note ---- Either :code:`h5_file` should be the name of an existing h5 file with appropriate structure (see :method:`convert_to_hdf5`) or `imgs` and `segs` should be non-None, and the hdf5 file will creating using the provided images and segmentations and we be named with the argument `h5_file`.<|endoftext|>
304cd483b8735eba580f51aab47dd9463af0770753cdddb2d31cd5b0590162b2
def convert_to_hdf5(self, imgs, segs, dx=None, compress=True): " Convert a dataset of images and boolean segmentations\n to hdf5 format, which is required for the level set routine.\n\n The format assuming `hf` is and h5py `File` is as follows::\n\n 'i'\n |_ img\n |_ seg\n |_ dist\n |_ attrs\n |_ dx\n\n Parameters\n ----------\n imgs: list of ndarray\n The list of image examples for the dataset\n\n segs: list of ndarray\n The list of image examples for the dataset\n\n dx: list of ndarray, shape=(n_examples, img.ndim), default=None\n The resolutions along each axis for each image. The default (None)\n assumes the resolution is 1 along each axis direction, but this\n might not be the case for anisotropic data.\n\n compress: bool, default=True\n If True, :code:`gzip` compression with default compression\n options (level=4) is used for the images and segmentations.\n\n " if os.path.exists(self.h5_file): msg = 'Dataset already exists at {}' raise FileExistsError(msg.format(self.h5_file)) n_examples = len(imgs) ndim = imgs[0].ndim compress_method = ('gzip' if compress else None) if (len(imgs) != len(segs)): msg = 'Mismatch in number of examples: imgs ({}), segs ({})' raise ValueError(msg.format(len(imgs), len(segs))) for i in range(n_examples): img = imgs[i] seg = segs[i] if (img.dtype != numpy.float): msg = 'imgs[{}] (dtype {}) was not float' raise TypeError(msg.format(i, img.dtype)) if (seg.dtype != numpy.bool): msg = 'seg[{}] (dtype {}) was not bool' raise TypeError(msg.format(i, seg.dtype)) if (img.ndim != ndim): msg = 'imgs[{}] (ndim={}) did not have correct dimensions ({})' raise ValueError(msg.format(i, img.ndim, ndim)) if (seg.ndim != ndim): msg = 'segs[{}] (ndim={}) did not have correct dimensions ({})' raise ValueError(msg.format(i, seg.ndim, ndim)) if (img.shape != seg.shape): msg = 'imgs[{}] shape {} does not match segs[{}] shape {}' raise ValueError(msg.format(i, img.shape, i, seg.shape)) if (dx is None): dx = numpy.ones((n_examples, ndim), dtype=numpy.float) elif (dx.shape != (n_examples, ndim)): msg = '`dx` was shape {} but should be shape {}' raise ValueError(msg.format(dx.shape, (n_examples, ndim))) hf = h5py.File(self.h5_file, mode='w') for i in range(n_examples): msg = 'Creating dataset entry {} / {}' logger.info(msg.format((i + 1), n_examples)) g = hf.create_group(EXAMPLE_KEY.format(i)) g.create_dataset(IMAGE_KEY, data=imgs[i], compression=compress_method) g.create_dataset(SEGMENTATION_KEY, data=segs[i], compression=compress_method) dist = skfmm.distance(((2 * segs[i].astype(numpy.float)) - 1), dx=dx[i]) g.create_dataset(DISTANCE_TRANSFORM_KEY, data=dist, compression=compress_method) g.attrs['dx'] = dx[i] hf.close()
Convert a dataset of images and boolean segmentations to hdf5 format, which is required for the level set routine. The format assuming `hf` is and h5py `File` is as follows:: 'i' |_ img |_ seg |_ dist |_ attrs |_ dx Parameters ---------- imgs: list of ndarray The list of image examples for the dataset segs: list of ndarray The list of image examples for the dataset dx: list of ndarray, shape=(n_examples, img.ndim), default=None The resolutions along each axis for each image. The default (None) assumes the resolution is 1 along each axis direction, but this might not be the case for anisotropic data. compress: bool, default=True If True, :code:`gzip` compression with default compression options (level=4) is used for the images and segmentations.
lsml/core/datasets_handler.py
convert_to_hdf5
sandeepdas05/lsm-crack-width
24
python
def convert_to_hdf5(self, imgs, segs, dx=None, compress=True): " Convert a dataset of images and boolean segmentations\n to hdf5 format, which is required for the level set routine.\n\n The format assuming `hf` is and h5py `File` is as follows::\n\n 'i'\n |_ img\n |_ seg\n |_ dist\n |_ attrs\n |_ dx\n\n Parameters\n ----------\n imgs: list of ndarray\n The list of image examples for the dataset\n\n segs: list of ndarray\n The list of image examples for the dataset\n\n dx: list of ndarray, shape=(n_examples, img.ndim), default=None\n The resolutions along each axis for each image. The default (None)\n assumes the resolution is 1 along each axis direction, but this\n might not be the case for anisotropic data.\n\n compress: bool, default=True\n If True, :code:`gzip` compression with default compression\n options (level=4) is used for the images and segmentations.\n\n " if os.path.exists(self.h5_file): msg = 'Dataset already exists at {}' raise FileExistsError(msg.format(self.h5_file)) n_examples = len(imgs) ndim = imgs[0].ndim compress_method = ('gzip' if compress else None) if (len(imgs) != len(segs)): msg = 'Mismatch in number of examples: imgs ({}), segs ({})' raise ValueError(msg.format(len(imgs), len(segs))) for i in range(n_examples): img = imgs[i] seg = segs[i] if (img.dtype != numpy.float): msg = 'imgs[{}] (dtype {}) was not float' raise TypeError(msg.format(i, img.dtype)) if (seg.dtype != numpy.bool): msg = 'seg[{}] (dtype {}) was not bool' raise TypeError(msg.format(i, seg.dtype)) if (img.ndim != ndim): msg = 'imgs[{}] (ndim={}) did not have correct dimensions ({})' raise ValueError(msg.format(i, img.ndim, ndim)) if (seg.ndim != ndim): msg = 'segs[{}] (ndim={}) did not have correct dimensions ({})' raise ValueError(msg.format(i, seg.ndim, ndim)) if (img.shape != seg.shape): msg = 'imgs[{}] shape {} does not match segs[{}] shape {}' raise ValueError(msg.format(i, img.shape, i, seg.shape)) if (dx is None): dx = numpy.ones((n_examples, ndim), dtype=numpy.float) elif (dx.shape != (n_examples, ndim)): msg = '`dx` was shape {} but should be shape {}' raise ValueError(msg.format(dx.shape, (n_examples, ndim))) hf = h5py.File(self.h5_file, mode='w') for i in range(n_examples): msg = 'Creating dataset entry {} / {}' logger.info(msg.format((i + 1), n_examples)) g = hf.create_group(EXAMPLE_KEY.format(i)) g.create_dataset(IMAGE_KEY, data=imgs[i], compression=compress_method) g.create_dataset(SEGMENTATION_KEY, data=segs[i], compression=compress_method) dist = skfmm.distance(((2 * segs[i].astype(numpy.float)) - 1), dx=dx[i]) g.create_dataset(DISTANCE_TRANSFORM_KEY, data=dist, compression=compress_method) g.attrs['dx'] = dx[i] hf.close()
def convert_to_hdf5(self, imgs, segs, dx=None, compress=True): " Convert a dataset of images and boolean segmentations\n to hdf5 format, which is required for the level set routine.\n\n The format assuming `hf` is and h5py `File` is as follows::\n\n 'i'\n |_ img\n |_ seg\n |_ dist\n |_ attrs\n |_ dx\n\n Parameters\n ----------\n imgs: list of ndarray\n The list of image examples for the dataset\n\n segs: list of ndarray\n The list of image examples for the dataset\n\n dx: list of ndarray, shape=(n_examples, img.ndim), default=None\n The resolutions along each axis for each image. The default (None)\n assumes the resolution is 1 along each axis direction, but this\n might not be the case for anisotropic data.\n\n compress: bool, default=True\n If True, :code:`gzip` compression with default compression\n options (level=4) is used for the images and segmentations.\n\n " if os.path.exists(self.h5_file): msg = 'Dataset already exists at {}' raise FileExistsError(msg.format(self.h5_file)) n_examples = len(imgs) ndim = imgs[0].ndim compress_method = ('gzip' if compress else None) if (len(imgs) != len(segs)): msg = 'Mismatch in number of examples: imgs ({}), segs ({})' raise ValueError(msg.format(len(imgs), len(segs))) for i in range(n_examples): img = imgs[i] seg = segs[i] if (img.dtype != numpy.float): msg = 'imgs[{}] (dtype {}) was not float' raise TypeError(msg.format(i, img.dtype)) if (seg.dtype != numpy.bool): msg = 'seg[{}] (dtype {}) was not bool' raise TypeError(msg.format(i, seg.dtype)) if (img.ndim != ndim): msg = 'imgs[{}] (ndim={}) did not have correct dimensions ({})' raise ValueError(msg.format(i, img.ndim, ndim)) if (seg.ndim != ndim): msg = 'segs[{}] (ndim={}) did not have correct dimensions ({})' raise ValueError(msg.format(i, seg.ndim, ndim)) if (img.shape != seg.shape): msg = 'imgs[{}] shape {} does not match segs[{}] shape {}' raise ValueError(msg.format(i, img.shape, i, seg.shape)) if (dx is None): dx = numpy.ones((n_examples, ndim), dtype=numpy.float) elif (dx.shape != (n_examples, ndim)): msg = '`dx` was shape {} but should be shape {}' raise ValueError(msg.format(dx.shape, (n_examples, ndim))) hf = h5py.File(self.h5_file, mode='w') for i in range(n_examples): msg = 'Creating dataset entry {} / {}' logger.info(msg.format((i + 1), n_examples)) g = hf.create_group(EXAMPLE_KEY.format(i)) g.create_dataset(IMAGE_KEY, data=imgs[i], compression=compress_method) g.create_dataset(SEGMENTATION_KEY, data=segs[i], compression=compress_method) dist = skfmm.distance(((2 * segs[i].astype(numpy.float)) - 1), dx=dx[i]) g.create_dataset(DISTANCE_TRANSFORM_KEY, data=dist, compression=compress_method) g.attrs['dx'] = dx[i] hf.close()<|docstring|>Convert a dataset of images and boolean segmentations to hdf5 format, which is required for the level set routine. The format assuming `hf` is and h5py `File` is as follows:: 'i' |_ img |_ seg |_ dist |_ attrs |_ dx Parameters ---------- imgs: list of ndarray The list of image examples for the dataset segs: list of ndarray The list of image examples for the dataset dx: list of ndarray, shape=(n_examples, img.ndim), default=None The resolutions along each axis for each image. The default (None) assumes the resolution is 1 along each axis direction, but this might not be the case for anisotropic data. compress: bool, default=True If True, :code:`gzip` compression with default compression options (level=4) is used for the images and segmentations.<|endoftext|>
b6351c915322daa14ad2902dd67d17cbc953cd8d5ea391779ba3ca68f48617d3
def assign_examples_to_datasets(self, training, validation, testing, subset_size, random_state): ' Assign the dataset example keys to training, validation,\n or testing\n\n training: float, or list of int\n A probability value or a list of indices of examples that belong\n to the training dataset\n\n validation: float, or list of int\n A probability value or a list of indices of examples that belong\n to the validation dataset\n\n testing: float, or list of int\n A probability value or a list of indices of examples that belong\n to the testing dataset\n\n subset_size: int or None\n If datasets are randomly partitioned, then the full dataset\n is first down-sampled to be `subset_size` before partitioning\n\n random_state: numpy.random.RandomState, default=None\n The random state is used only to perform the randomized split\n into when training/validation/testing are provided as probability\n values\n\n ' if (not random_state): random_state = numpy.random.RandomState() msg = 'RandomState not provided; results will not be reproducible' logger.warning(msg) elif (not isinstance(random_state, numpy.random.RandomState)): msg = '`random_state` ({}) not instance numpy.random.RandomState' raise TypeError(msg.format(type(random_state))) if all([isinstance(item, float) for item in (training, validation, testing)]): self.assign_examples_randomly(probabilities=(training, validation, testing), subset_size=subset_size, random_state=random_state) elif all([(isinstance(index_list, list) and [isinstance(index, int) for index in index_list]) for index_list in (training, validation, testing)]): self.assign_examples_by_indices(training_dataset_indices=training, validation_dataset_indices=validation, testing_dataset_indices=testing) else: msg = '`training`, `validation`, and `testing` should be all floats or all list of ints' raise ValueError(msg)
Assign the dataset example keys to training, validation, or testing training: float, or list of int A probability value or a list of indices of examples that belong to the training dataset validation: float, or list of int A probability value or a list of indices of examples that belong to the validation dataset testing: float, or list of int A probability value or a list of indices of examples that belong to the testing dataset subset_size: int or None If datasets are randomly partitioned, then the full dataset is first down-sampled to be `subset_size` before partitioning random_state: numpy.random.RandomState, default=None The random state is used only to perform the randomized split into when training/validation/testing are provided as probability values
lsml/core/datasets_handler.py
assign_examples_to_datasets
sandeepdas05/lsm-crack-width
24
python
def assign_examples_to_datasets(self, training, validation, testing, subset_size, random_state): ' Assign the dataset example keys to training, validation,\n or testing\n\n training: float, or list of int\n A probability value or a list of indices of examples that belong\n to the training dataset\n\n validation: float, or list of int\n A probability value or a list of indices of examples that belong\n to the validation dataset\n\n testing: float, or list of int\n A probability value or a list of indices of examples that belong\n to the testing dataset\n\n subset_size: int or None\n If datasets are randomly partitioned, then the full dataset\n is first down-sampled to be `subset_size` before partitioning\n\n random_state: numpy.random.RandomState, default=None\n The random state is used only to perform the randomized split\n into when training/validation/testing are provided as probability\n values\n\n ' if (not random_state): random_state = numpy.random.RandomState() msg = 'RandomState not provided; results will not be reproducible' logger.warning(msg) elif (not isinstance(random_state, numpy.random.RandomState)): msg = '`random_state` ({}) not instance numpy.random.RandomState' raise TypeError(msg.format(type(random_state))) if all([isinstance(item, float) for item in (training, validation, testing)]): self.assign_examples_randomly(probabilities=(training, validation, testing), subset_size=subset_size, random_state=random_state) elif all([(isinstance(index_list, list) and [isinstance(index, int) for index in index_list]) for index_list in (training, validation, testing)]): self.assign_examples_by_indices(training_dataset_indices=training, validation_dataset_indices=validation, testing_dataset_indices=testing) else: msg = '`training`, `validation`, and `testing` should be all floats or all list of ints' raise ValueError(msg)
def assign_examples_to_datasets(self, training, validation, testing, subset_size, random_state): ' Assign the dataset example keys to training, validation,\n or testing\n\n training: float, or list of int\n A probability value or a list of indices of examples that belong\n to the training dataset\n\n validation: float, or list of int\n A probability value or a list of indices of examples that belong\n to the validation dataset\n\n testing: float, or list of int\n A probability value or a list of indices of examples that belong\n to the testing dataset\n\n subset_size: int or None\n If datasets are randomly partitioned, then the full dataset\n is first down-sampled to be `subset_size` before partitioning\n\n random_state: numpy.random.RandomState, default=None\n The random state is used only to perform the randomized split\n into when training/validation/testing are provided as probability\n values\n\n ' if (not random_state): random_state = numpy.random.RandomState() msg = 'RandomState not provided; results will not be reproducible' logger.warning(msg) elif (not isinstance(random_state, numpy.random.RandomState)): msg = '`random_state` ({}) not instance numpy.random.RandomState' raise TypeError(msg.format(type(random_state))) if all([isinstance(item, float) for item in (training, validation, testing)]): self.assign_examples_randomly(probabilities=(training, validation, testing), subset_size=subset_size, random_state=random_state) elif all([(isinstance(index_list, list) and [isinstance(index, int) for index in index_list]) for index_list in (training, validation, testing)]): self.assign_examples_by_indices(training_dataset_indices=training, validation_dataset_indices=validation, testing_dataset_indices=testing) else: msg = '`training`, `validation`, and `testing` should be all floats or all list of ints' raise ValueError(msg)<|docstring|>Assign the dataset example keys to training, validation, or testing training: float, or list of int A probability value or a list of indices of examples that belong to the training dataset validation: float, or list of int A probability value or a list of indices of examples that belong to the validation dataset testing: float, or list of int A probability value or a list of indices of examples that belong to the testing dataset subset_size: int or None If datasets are randomly partitioned, then the full dataset is first down-sampled to be `subset_size` before partitioning random_state: numpy.random.RandomState, default=None The random state is used only to perform the randomized split into when training/validation/testing are provided as probability values<|endoftext|>
520bc53bd1bc7007be5a33b26c22294848301d41407b5722319f769b49ccc760
def assign_examples_by_indices(self, training_dataset_indices, validation_dataset_indices, testing_dataset_indices): ' Specify which of the data should belong to training, validation,\n and testing datasets. Automatic randomization is possible: see keyword\n argument parameters.\n\n Parameters\n ----------\n training_dataset_indices: list of integers\n The list of indices of examples that belong to the training dataset\n\n validation_dataset_indices: list of integers\n The list of indices of examples that belong to the validation\n dataset\n\n testing_dataset_indices: list of integers\n The list of indices of examples that belong to the testing dataset\n\n ' if (not all([isinstance(index, int) for index in training_dataset_indices])): msg = 'Training data indices must be a list of integers' raise ValueError(msg) if (not all([isinstance(index, int) for index in validation_dataset_indices])): msg = 'Validation data indices must be a list of integers' raise ValueError(msg) if (not all([isinstance(index, int) for index in testing_dataset_indices])): msg = 'Training data indices must be a list of integers' raise ValueError(msg) self.datasets[TRAINING_DATASET_KEY] = [self._example_key_from_index(index) for index in training_dataset_indices] self.datasets[VALIDATION_DATASET_KEY] = [self._example_key_from_index(index) for index in validation_dataset_indices] self.datasets[TESTING_DATASET_KEY] = [self._example_key_from_index(index) for index in testing_dataset_indices]
Specify which of the data should belong to training, validation, and testing datasets. Automatic randomization is possible: see keyword argument parameters. Parameters ---------- training_dataset_indices: list of integers The list of indices of examples that belong to the training dataset validation_dataset_indices: list of integers The list of indices of examples that belong to the validation dataset testing_dataset_indices: list of integers The list of indices of examples that belong to the testing dataset
lsml/core/datasets_handler.py
assign_examples_by_indices
sandeepdas05/lsm-crack-width
24
python
def assign_examples_by_indices(self, training_dataset_indices, validation_dataset_indices, testing_dataset_indices): ' Specify which of the data should belong to training, validation,\n and testing datasets. Automatic randomization is possible: see keyword\n argument parameters.\n\n Parameters\n ----------\n training_dataset_indices: list of integers\n The list of indices of examples that belong to the training dataset\n\n validation_dataset_indices: list of integers\n The list of indices of examples that belong to the validation\n dataset\n\n testing_dataset_indices: list of integers\n The list of indices of examples that belong to the testing dataset\n\n ' if (not all([isinstance(index, int) for index in training_dataset_indices])): msg = 'Training data indices must be a list of integers' raise ValueError(msg) if (not all([isinstance(index, int) for index in validation_dataset_indices])): msg = 'Validation data indices must be a list of integers' raise ValueError(msg) if (not all([isinstance(index, int) for index in testing_dataset_indices])): msg = 'Training data indices must be a list of integers' raise ValueError(msg) self.datasets[TRAINING_DATASET_KEY] = [self._example_key_from_index(index) for index in training_dataset_indices] self.datasets[VALIDATION_DATASET_KEY] = [self._example_key_from_index(index) for index in validation_dataset_indices] self.datasets[TESTING_DATASET_KEY] = [self._example_key_from_index(index) for index in testing_dataset_indices]
def assign_examples_by_indices(self, training_dataset_indices, validation_dataset_indices, testing_dataset_indices): ' Specify which of the data should belong to training, validation,\n and testing datasets. Automatic randomization is possible: see keyword\n argument parameters.\n\n Parameters\n ----------\n training_dataset_indices: list of integers\n The list of indices of examples that belong to the training dataset\n\n validation_dataset_indices: list of integers\n The list of indices of examples that belong to the validation\n dataset\n\n testing_dataset_indices: list of integers\n The list of indices of examples that belong to the testing dataset\n\n ' if (not all([isinstance(index, int) for index in training_dataset_indices])): msg = 'Training data indices must be a list of integers' raise ValueError(msg) if (not all([isinstance(index, int) for index in validation_dataset_indices])): msg = 'Validation data indices must be a list of integers' raise ValueError(msg) if (not all([isinstance(index, int) for index in testing_dataset_indices])): msg = 'Training data indices must be a list of integers' raise ValueError(msg) self.datasets[TRAINING_DATASET_KEY] = [self._example_key_from_index(index) for index in training_dataset_indices] self.datasets[VALIDATION_DATASET_KEY] = [self._example_key_from_index(index) for index in validation_dataset_indices] self.datasets[TESTING_DATASET_KEY] = [self._example_key_from_index(index) for index in testing_dataset_indices]<|docstring|>Specify which of the data should belong to training, validation, and testing datasets. Automatic randomization is possible: see keyword argument parameters. Parameters ---------- training_dataset_indices: list of integers The list of indices of examples that belong to the training dataset validation_dataset_indices: list of integers The list of indices of examples that belong to the validation dataset testing_dataset_indices: list of integers The list of indices of examples that belong to the testing dataset<|endoftext|>
3ec59a8dbed826680bd61f90f8ee30e6331bf8d136e1ca150dfae4998c3cdd9a
def assign_examples_randomly(self, probabilities, subset_size, random_state): ' Assign examples randomly into training, validation, and testing\n\n Parameters\n ----------\n probabilities: 3-tuple of floats\n The probability of being placed in the training, validation\n or testing\n\n subset_size: int\n If provided, then should be less than or equal to\n :code:`len(keys)`. If given, then :code:`keys` is first\n sub-sampled by :code:`subset_size`\n before splitting.\n\n random_state: numpy.random.RandomState\n Provide for reproducible results\n\n ' with self.open_h5_file() as hf: keys = list(hf.keys()) if ((subset_size is not None) and (subset_size > len(keys))): raise ValueError('`subset_size` must be <= `len(keys)`') if (subset_size is None): subset_size = len(keys) sub_keys = random_state.choice(keys, replace=False, size=subset_size) n_keys = len(sub_keys) indicators = random_state.multinomial(n=1, pvals=probabilities, size=n_keys) dataset_keys = self._iterate_dataset_keys() for (idataset_key, dataset_key) in enumerate(dataset_keys): self.datasets[dataset_key] = [self._example_key_from_index(index) for (index, indicator) in enumerate(indicators) if (list(indicator).index(1) == idataset_key)]
Assign examples randomly into training, validation, and testing Parameters ---------- probabilities: 3-tuple of floats The probability of being placed in the training, validation or testing subset_size: int If provided, then should be less than or equal to :code:`len(keys)`. If given, then :code:`keys` is first sub-sampled by :code:`subset_size` before splitting. random_state: numpy.random.RandomState Provide for reproducible results
lsml/core/datasets_handler.py
assign_examples_randomly
sandeepdas05/lsm-crack-width
24
python
def assign_examples_randomly(self, probabilities, subset_size, random_state): ' Assign examples randomly into training, validation, and testing\n\n Parameters\n ----------\n probabilities: 3-tuple of floats\n The probability of being placed in the training, validation\n or testing\n\n subset_size: int\n If provided, then should be less than or equal to\n :code:`len(keys)`. If given, then :code:`keys` is first\n sub-sampled by :code:`subset_size`\n before splitting.\n\n random_state: numpy.random.RandomState\n Provide for reproducible results\n\n ' with self.open_h5_file() as hf: keys = list(hf.keys()) if ((subset_size is not None) and (subset_size > len(keys))): raise ValueError('`subset_size` must be <= `len(keys)`') if (subset_size is None): subset_size = len(keys) sub_keys = random_state.choice(keys, replace=False, size=subset_size) n_keys = len(sub_keys) indicators = random_state.multinomial(n=1, pvals=probabilities, size=n_keys) dataset_keys = self._iterate_dataset_keys() for (idataset_key, dataset_key) in enumerate(dataset_keys): self.datasets[dataset_key] = [self._example_key_from_index(index) for (index, indicator) in enumerate(indicators) if (list(indicator).index(1) == idataset_key)]
def assign_examples_randomly(self, probabilities, subset_size, random_state): ' Assign examples randomly into training, validation, and testing\n\n Parameters\n ----------\n probabilities: 3-tuple of floats\n The probability of being placed in the training, validation\n or testing\n\n subset_size: int\n If provided, then should be less than or equal to\n :code:`len(keys)`. If given, then :code:`keys` is first\n sub-sampled by :code:`subset_size`\n before splitting.\n\n random_state: numpy.random.RandomState\n Provide for reproducible results\n\n ' with self.open_h5_file() as hf: keys = list(hf.keys()) if ((subset_size is not None) and (subset_size > len(keys))): raise ValueError('`subset_size` must be <= `len(keys)`') if (subset_size is None): subset_size = len(keys) sub_keys = random_state.choice(keys, replace=False, size=subset_size) n_keys = len(sub_keys) indicators = random_state.multinomial(n=1, pvals=probabilities, size=n_keys) dataset_keys = self._iterate_dataset_keys() for (idataset_key, dataset_key) in enumerate(dataset_keys): self.datasets[dataset_key] = [self._example_key_from_index(index) for (index, indicator) in enumerate(indicators) if (list(indicator).index(1) == idataset_key)]<|docstring|>Assign examples randomly into training, validation, and testing Parameters ---------- probabilities: 3-tuple of floats The probability of being placed in the training, validation or testing subset_size: int If provided, then should be less than or equal to :code:`len(keys)`. If given, then :code:`keys` is first sub-sampled by :code:`subset_size` before splitting. random_state: numpy.random.RandomState Provide for reproducible results<|endoftext|>
1a54f94c6d8e68e3ef86901afcb44e7e68caf49838d507f89310d21677d53158
@contextlib.contextmanager def open_h5_file(self): ' Opens the data file\n ' h5 = None try: h5 = h5py.File(self.h5_file, mode='r') (yield h5) finally: if h5: h5.close()
Opens the data file
lsml/core/datasets_handler.py
open_h5_file
sandeepdas05/lsm-crack-width
24
python
@contextlib.contextmanager def open_h5_file(self): ' \n ' h5 = None try: h5 = h5py.File(self.h5_file, mode='r') (yield h5) finally: if h5: h5.close()
@contextlib.contextmanager def open_h5_file(self): ' \n ' h5 = None try: h5 = h5py.File(self.h5_file, mode='r') (yield h5) finally: if h5: h5.close()<|docstring|>Opens the data file<|endoftext|>
8c05c33324c8931f650f6516a212c5bd488d209d41905afcd46500987b063154
def _iterate_dataset_keys(self): ' Iterates through the dataset keys\n ' for dataset_key in DATASET_KEYS: (yield dataset_key)
Iterates through the dataset keys
lsml/core/datasets_handler.py
_iterate_dataset_keys
sandeepdas05/lsm-crack-width
24
python
def _iterate_dataset_keys(self): ' \n ' for dataset_key in DATASET_KEYS: (yield dataset_key)
def _iterate_dataset_keys(self): ' \n ' for dataset_key in DATASET_KEYS: (yield dataset_key)<|docstring|>Iterates through the dataset keys<|endoftext|>
d8330114bf500c2de39fe7895d61fafadcee72228758a48ef2c4eb8755381672
def _example_key_from_index(self, index): ' Get the example key for the corresponding index\n ' return EXAMPLE_KEY.format(index)
Get the example key for the corresponding index
lsml/core/datasets_handler.py
_example_key_from_index
sandeepdas05/lsm-crack-width
24
python
def _example_key_from_index(self, index): ' \n ' return EXAMPLE_KEY.format(index)
def _example_key_from_index(self, index): ' \n ' return EXAMPLE_KEY.format(index)<|docstring|>Get the example key for the corresponding index<|endoftext|>
f5c5de946fc24d6292d7cb40da78097f3fbe008114ee8d910a01458c3b006c1e
def get_dataset_for_example_key(self, example_key): ' Get the dataset for the corresponding example key\n\n Returns\n -------\n dataset_key: str or None\n One of TRAINING_DATASET_KEY, VALIDATION_DATASET_KEY, or\n TESTING_DATASET_KEY if found; otherwise, returns None.\n\n ' if self.in_training_dataset(example_key): return TRAINING_DATASET_KEY elif self.in_validation_dataset(example_key): return VALIDATION_DATASET_KEY elif self.in_testing_dataset(example_key): return TESTING_DATASET_KEY else: return None
Get the dataset for the corresponding example key Returns ------- dataset_key: str or None One of TRAINING_DATASET_KEY, VALIDATION_DATASET_KEY, or TESTING_DATASET_KEY if found; otherwise, returns None.
lsml/core/datasets_handler.py
get_dataset_for_example_key
sandeepdas05/lsm-crack-width
24
python
def get_dataset_for_example_key(self, example_key): ' Get the dataset for the corresponding example key\n\n Returns\n -------\n dataset_key: str or None\n One of TRAINING_DATASET_KEY, VALIDATION_DATASET_KEY, or\n TESTING_DATASET_KEY if found; otherwise, returns None.\n\n ' if self.in_training_dataset(example_key): return TRAINING_DATASET_KEY elif self.in_validation_dataset(example_key): return VALIDATION_DATASET_KEY elif self.in_testing_dataset(example_key): return TESTING_DATASET_KEY else: return None
def get_dataset_for_example_key(self, example_key): ' Get the dataset for the corresponding example key\n\n Returns\n -------\n dataset_key: str or None\n One of TRAINING_DATASET_KEY, VALIDATION_DATASET_KEY, or\n TESTING_DATASET_KEY if found; otherwise, returns None.\n\n ' if self.in_training_dataset(example_key): return TRAINING_DATASET_KEY elif self.in_validation_dataset(example_key): return VALIDATION_DATASET_KEY elif self.in_testing_dataset(example_key): return TESTING_DATASET_KEY else: return None<|docstring|>Get the dataset for the corresponding example key Returns ------- dataset_key: str or None One of TRAINING_DATASET_KEY, VALIDATION_DATASET_KEY, or TESTING_DATASET_KEY if found; otherwise, returns None.<|endoftext|>
58e9ffc72b1bafe2d74a1b4b8ab6e5d90af14ff9905351415e05fcea611b08e9
def get_example_by_index(self, index): ' Get the `DatasetExample` corresponding to `index`\n ' with self.open_h5_file() as hf: example_key = self._example_key_from_index(index) example = DatasetExample(index=index, key=example_key, img=hf[example_key][IMAGE_KEY][...], seg=hf[example_key][SEGMENTATION_KEY][...], dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...], dx=hf[example_key].attrs['dx']) return example
Get the `DatasetExample` corresponding to `index`
lsml/core/datasets_handler.py
get_example_by_index
sandeepdas05/lsm-crack-width
24
python
def get_example_by_index(self, index): ' \n ' with self.open_h5_file() as hf: example_key = self._example_key_from_index(index) example = DatasetExample(index=index, key=example_key, img=hf[example_key][IMAGE_KEY][...], seg=hf[example_key][SEGMENTATION_KEY][...], dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...], dx=hf[example_key].attrs['dx']) return example
def get_example_by_index(self, index): ' \n ' with self.open_h5_file() as hf: example_key = self._example_key_from_index(index) example = DatasetExample(index=index, key=example_key, img=hf[example_key][IMAGE_KEY][...], seg=hf[example_key][SEGMENTATION_KEY][...], dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...], dx=hf[example_key].attrs['dx']) return example<|docstring|>Get the `DatasetExample` corresponding to `index`<|endoftext|>
3f412c6ed6695cc84f3dfe1f059602232bf01484a7308f08ff19d0c05ae559f0
def iterate_examples(self, dataset_key=None): ' Iterates through the hdf5 dataset\n\n Parameters\n ----------\n dataset_key: str, default=None\n Limit the iterations to the given dataset; None yields all examples\n\n Returns\n -------\n dataset: generator\n The return generator returns\n `(i, key, img[i], seg[i], dist[i], dx[i])`\n at each iteration, where i is the index and key is the\n key into the hdf5 dataset for the respective index\n\n ' with self.open_h5_file() as hf: for i in range(self.n_examples): example_key = self._example_key_from_index(i) if (dataset_key and (not self._in_dataset(example_key, dataset_key))): continue (yield DatasetExample(index=i, key=example_key, img=hf[example_key][IMAGE_KEY][...], seg=hf[example_key][SEGMENTATION_KEY][...], dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...], dx=hf[example_key].attrs['dx']))
Iterates through the hdf5 dataset Parameters ---------- dataset_key: str, default=None Limit the iterations to the given dataset; None yields all examples Returns ------- dataset: generator The return generator returns `(i, key, img[i], seg[i], dist[i], dx[i])` at each iteration, where i is the index and key is the key into the hdf5 dataset for the respective index
lsml/core/datasets_handler.py
iterate_examples
sandeepdas05/lsm-crack-width
24
python
def iterate_examples(self, dataset_key=None): ' Iterates through the hdf5 dataset\n\n Parameters\n ----------\n dataset_key: str, default=None\n Limit the iterations to the given dataset; None yields all examples\n\n Returns\n -------\n dataset: generator\n The return generator returns\n `(i, key, img[i], seg[i], dist[i], dx[i])`\n at each iteration, where i is the index and key is the\n key into the hdf5 dataset for the respective index\n\n ' with self.open_h5_file() as hf: for i in range(self.n_examples): example_key = self._example_key_from_index(i) if (dataset_key and (not self._in_dataset(example_key, dataset_key))): continue (yield DatasetExample(index=i, key=example_key, img=hf[example_key][IMAGE_KEY][...], seg=hf[example_key][SEGMENTATION_KEY][...], dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...], dx=hf[example_key].attrs['dx']))
def iterate_examples(self, dataset_key=None): ' Iterates through the hdf5 dataset\n\n Parameters\n ----------\n dataset_key: str, default=None\n Limit the iterations to the given dataset; None yields all examples\n\n Returns\n -------\n dataset: generator\n The return generator returns\n `(i, key, img[i], seg[i], dist[i], dx[i])`\n at each iteration, where i is the index and key is the\n key into the hdf5 dataset for the respective index\n\n ' with self.open_h5_file() as hf: for i in range(self.n_examples): example_key = self._example_key_from_index(i) if (dataset_key and (not self._in_dataset(example_key, dataset_key))): continue (yield DatasetExample(index=i, key=example_key, img=hf[example_key][IMAGE_KEY][...], seg=hf[example_key][SEGMENTATION_KEY][...], dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...], dx=hf[example_key].attrs['dx']))<|docstring|>Iterates through the hdf5 dataset Parameters ---------- dataset_key: str, default=None Limit the iterations to the given dataset; None yields all examples Returns ------- dataset: generator The return generator returns `(i, key, img[i], seg[i], dist[i], dx[i])` at each iteration, where i is the index and key is the key into the hdf5 dataset for the respective index<|endoftext|>
94119bd4c351f25c7f212d7db770d9ee761b86696349198ca93f9f1a6e2908f1
def in_training_dataset(self, example_key): ' Returns True if example key is in the training dataset\n ' return self._in_dataset(example_key, TRAINING_DATASET_KEY)
Returns True if example key is in the training dataset
lsml/core/datasets_handler.py
in_training_dataset
sandeepdas05/lsm-crack-width
24
python
def in_training_dataset(self, example_key): ' \n ' return self._in_dataset(example_key, TRAINING_DATASET_KEY)
def in_training_dataset(self, example_key): ' \n ' return self._in_dataset(example_key, TRAINING_DATASET_KEY)<|docstring|>Returns True if example key is in the training dataset<|endoftext|>
ebd73804929f061fcb700f08cafd8c596aec437e90c880a7fc59f1bfaf3012a5
def in_validation_dataset(self, example_key): ' Returns True if example key is in the validation dataset\n ' return self._in_dataset(example_key, VALIDATION_DATASET_KEY)
Returns True if example key is in the validation dataset
lsml/core/datasets_handler.py
in_validation_dataset
sandeepdas05/lsm-crack-width
24
python
def in_validation_dataset(self, example_key): ' \n ' return self._in_dataset(example_key, VALIDATION_DATASET_KEY)
def in_validation_dataset(self, example_key): ' \n ' return self._in_dataset(example_key, VALIDATION_DATASET_KEY)<|docstring|>Returns True if example key is in the validation dataset<|endoftext|>
30eeb4371ab427c1f7865b3225a12d4ff282dc6ae51c1eac959306507f37fe25
def in_testing_dataset(self, example_key): ' Returns True if example key is in the testing dataset\n ' return self._in_dataset(example_key, TESTING_DATASET_KEY)
Returns True if example key is in the testing dataset
lsml/core/datasets_handler.py
in_testing_dataset
sandeepdas05/lsm-crack-width
24
python
def in_testing_dataset(self, example_key): ' \n ' return self._in_dataset(example_key, TESTING_DATASET_KEY)
def in_testing_dataset(self, example_key): ' \n ' return self._in_dataset(example_key, TESTING_DATASET_KEY)<|docstring|>Returns True if example key is in the testing dataset<|endoftext|>
1286b384d587f8fe6d080c3fc61a55f0d3efa5d622ded616ead04dd53a13dc35
def replace(src, dst): '\n Replace a file with another.\n\n :type src: str\n :param src: Source file path\n :type dst: str\n :param dst: Destination path, the file to replace\n ' _copy_or_init_permissions(target_file=src, source_file=dst) try: return os.rename(src, dst) except rename_exception_to_handle as error: if (error.errno != errno.EEXIST): raise import ctypes import ctypes.wintypes replace_file = ctypes.windll.kernel32.ReplaceFile replace_file.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.wintypes.DWORD, ctypes.wintypes.LPVOID, ctypes.wintypes.LPVOID] replace_succeeded = replace_file(ctypes.c_wchar_p(_path_to_unicode(dst)), ctypes.c_wchar_p(_path_to_unicode(src)), None, 0, None, None) if (not replace_succeeded): raise OSError(('Failed to replace %r with %r' % (dst, src)))
Replace a file with another. :type src: str :param src: Source file path :type dst: str :param dst: Destination path, the file to replace
prequ/file_replacer.py
replace
wgarlock/prelaunch
13
python
def replace(src, dst): '\n Replace a file with another.\n\n :type src: str\n :param src: Source file path\n :type dst: str\n :param dst: Destination path, the file to replace\n ' _copy_or_init_permissions(target_file=src, source_file=dst) try: return os.rename(src, dst) except rename_exception_to_handle as error: if (error.errno != errno.EEXIST): raise import ctypes import ctypes.wintypes replace_file = ctypes.windll.kernel32.ReplaceFile replace_file.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.wintypes.DWORD, ctypes.wintypes.LPVOID, ctypes.wintypes.LPVOID] replace_succeeded = replace_file(ctypes.c_wchar_p(_path_to_unicode(dst)), ctypes.c_wchar_p(_path_to_unicode(src)), None, 0, None, None) if (not replace_succeeded): raise OSError(('Failed to replace %r with %r' % (dst, src)))
def replace(src, dst): '\n Replace a file with another.\n\n :type src: str\n :param src: Source file path\n :type dst: str\n :param dst: Destination path, the file to replace\n ' _copy_or_init_permissions(target_file=src, source_file=dst) try: return os.rename(src, dst) except rename_exception_to_handle as error: if (error.errno != errno.EEXIST): raise import ctypes import ctypes.wintypes replace_file = ctypes.windll.kernel32.ReplaceFile replace_file.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.wintypes.DWORD, ctypes.wintypes.LPVOID, ctypes.wintypes.LPVOID] replace_succeeded = replace_file(ctypes.c_wchar_p(_path_to_unicode(dst)), ctypes.c_wchar_p(_path_to_unicode(src)), None, 0, None, None) if (not replace_succeeded): raise OSError(('Failed to replace %r with %r' % (dst, src)))<|docstring|>Replace a file with another. :type src: str :param src: Source file path :type dst: str :param dst: Destination path, the file to replace<|endoftext|>
84d9b2674f4e753f96ead386f98a8d30d194c311f6c59ad7b90723c386bb34c8
def _copy_or_init_permissions(target_file, source_file): '\n Set target file permissions from source file or from umask.\n\n If source file exists, copy its permissions. Otherwise set default\n permissions using current umask.\n ' try: shutil.copymode(source_file, target_file) except OSError: os.chmod(target_file, (438 & (~ _get_umask())))
Set target file permissions from source file or from umask. If source file exists, copy its permissions. Otherwise set default permissions using current umask.
prequ/file_replacer.py
_copy_or_init_permissions
wgarlock/prelaunch
13
python
def _copy_or_init_permissions(target_file, source_file): '\n Set target file permissions from source file or from umask.\n\n If source file exists, copy its permissions. Otherwise set default\n permissions using current umask.\n ' try: shutil.copymode(source_file, target_file) except OSError: os.chmod(target_file, (438 & (~ _get_umask())))
def _copy_or_init_permissions(target_file, source_file): '\n Set target file permissions from source file or from umask.\n\n If source file exists, copy its permissions. Otherwise set default\n permissions using current umask.\n ' try: shutil.copymode(source_file, target_file) except OSError: os.chmod(target_file, (438 & (~ _get_umask())))<|docstring|>Set target file permissions from source file or from umask. If source file exists, copy its permissions. Otherwise set default permissions using current umask.<|endoftext|>
9b11debeecaf34e3aa0a84ea5eee2cb4b040edd785bcab6cd4748784f52b5738
def _get_umask(): '\n Get current umask (without changing it as os.umask does).\n ' umask = os.umask(0) os.umask(umask) return umask
Get current umask (without changing it as os.umask does).
prequ/file_replacer.py
_get_umask
wgarlock/prelaunch
13
python
def _get_umask(): '\n \n ' umask = os.umask(0) os.umask(umask) return umask
def _get_umask(): '\n \n ' umask = os.umask(0) os.umask(umask) return umask<|docstring|>Get current umask (without changing it as os.umask does).<|endoftext|>
3a90a24c305414e45db3566c99f881195f8e1367a72305d4bb2c1dd0246467a2
def _path_to_unicode(path): "\n Convert filesystem path to unicode.\n\n >>> if sys.getfilesystemencoding().lower() in ['utf-8', 'utf-16']:\n ... encoded_path = u'X€Y'.encode(sys.getfilesystemencoding())\n ... assert _path_to_unicode(encoded_path) == u'X€Y'\n\n >>> assert _path_to_unicode(b'some ascii content') == u'some ascii content'\n >>> assert _path_to_unicode(u'some unicode') == u'some unicode'\n >>> assert type(_path_to_unicode(b'x')) == type(u'')\n >>> assert type(_path_to_unicode(u'x')) == type(u'')\n\n :type path: bytes|unicode\n :rtype: unicode\n " if isinstance(path, text_type): return path encoding = (sys.getfilesystemencoding() or sys.getdefaultencoding()) return path.decode(encoding)
Convert filesystem path to unicode. >>> if sys.getfilesystemencoding().lower() in ['utf-8', 'utf-16']: ... encoded_path = u'X€Y'.encode(sys.getfilesystemencoding()) ... assert _path_to_unicode(encoded_path) == u'X€Y' >>> assert _path_to_unicode(b'some ascii content') == u'some ascii content' >>> assert _path_to_unicode(u'some unicode') == u'some unicode' >>> assert type(_path_to_unicode(b'x')) == type(u'') >>> assert type(_path_to_unicode(u'x')) == type(u'') :type path: bytes|unicode :rtype: unicode
prequ/file_replacer.py
_path_to_unicode
wgarlock/prelaunch
13
python
def _path_to_unicode(path): "\n Convert filesystem path to unicode.\n\n >>> if sys.getfilesystemencoding().lower() in ['utf-8', 'utf-16']:\n ... encoded_path = u'X€Y'.encode(sys.getfilesystemencoding())\n ... assert _path_to_unicode(encoded_path) == u'X€Y'\n\n >>> assert _path_to_unicode(b'some ascii content') == u'some ascii content'\n >>> assert _path_to_unicode(u'some unicode') == u'some unicode'\n >>> assert type(_path_to_unicode(b'x')) == type(u)\n >>> assert type(_path_to_unicode(u'x')) == type(u)\n\n :type path: bytes|unicode\n :rtype: unicode\n " if isinstance(path, text_type): return path encoding = (sys.getfilesystemencoding() or sys.getdefaultencoding()) return path.decode(encoding)
def _path_to_unicode(path): "\n Convert filesystem path to unicode.\n\n >>> if sys.getfilesystemencoding().lower() in ['utf-8', 'utf-16']:\n ... encoded_path = u'X€Y'.encode(sys.getfilesystemencoding())\n ... assert _path_to_unicode(encoded_path) == u'X€Y'\n\n >>> assert _path_to_unicode(b'some ascii content') == u'some ascii content'\n >>> assert _path_to_unicode(u'some unicode') == u'some unicode'\n >>> assert type(_path_to_unicode(b'x')) == type(u)\n >>> assert type(_path_to_unicode(u'x')) == type(u)\n\n :type path: bytes|unicode\n :rtype: unicode\n " if isinstance(path, text_type): return path encoding = (sys.getfilesystemencoding() or sys.getdefaultencoding()) return path.decode(encoding)<|docstring|>Convert filesystem path to unicode. >>> if sys.getfilesystemencoding().lower() in ['utf-8', 'utf-16']: ... encoded_path = u'X€Y'.encode(sys.getfilesystemencoding()) ... assert _path_to_unicode(encoded_path) == u'X€Y' >>> assert _path_to_unicode(b'some ascii content') == u'some ascii content' >>> assert _path_to_unicode(u'some unicode') == u'some unicode' >>> assert type(_path_to_unicode(b'x')) == type(u'') >>> assert type(_path_to_unicode(u'x')) == type(u'') :type path: bytes|unicode :rtype: unicode<|endoftext|>
1878341ad4037c7adb4aeb9d5844e56f870397bfaf7472164900faf298565ee5
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=None, extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False, use_date_for: Optional[List[str]]=None, parent_path: Optional[str]='') -> Union[(StructType, DataType)]: '\n A patient\'s point-in-time set of recommendations (i.e. forecasting) according\n to a published schedule with optional supporting justification.\n\n\n resourceType: This is a ImmunizationRecommendation resource\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n identifier: A unique identifier assigned to this particular recommendation record.\n\n patient: The patient the recommendation(s) are for.\n\n date: The date the immunization recommendation(s) were created.\n\n authority: Indicates the authority who published the protocol (e.g. ACIP).\n\n recommendation: Vaccine administration recommendations.\n\n ' if (extension_fields is None): extension_fields = ['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueReference', 'valueCodeableConcept', 'valueAddress'] from spark_fhir_schemas.r4.simple_types.id import idSchema from spark_fhir_schemas.r4.complex_types.meta import MetaSchema from spark_fhir_schemas.r4.simple_types.uri import uriSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema from spark_fhir_schemas.r4.complex_types.immunizationrecommendation_recommendation import ImmunizationRecommendation_RecommendationSchema if ((max_recursion_limit and (nesting_list.count('ImmunizationRecommendation') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['ImmunizationRecommendation']) my_parent_path = ((parent_path + '.immunizationrecommendation') if parent_path else 'immunizationrecommendation') schema = StructType([StructField('resourceType', StringType(), True), StructField('id', idSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.id')), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('implicitRules', uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.implicitrules')), True), StructField('language', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.language')), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('patient', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('date', dateTimeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.date')), True), StructField('authority', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('recommendation', ArrayType(ImmunizationRecommendation_RecommendationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
A patient's point-in-time set of recommendations (i.e. forecasting) according to a published schedule with optional supporting justification. resourceType: This is a ImmunizationRecommendation resource id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. extension: May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). identifier: A unique identifier assigned to this particular recommendation record. patient: The patient the recommendation(s) are for. date: The date the immunization recommendation(s) were created. authority: Indicates the authority who published the protocol (e.g. ACIP). recommendation: Vaccine administration recommendations.
spark_fhir_schemas/r4/resources/immunizationrecommendation.py
get_schema
icanbwell/SparkFhirSchemas
0
python
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=None, extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False, use_date_for: Optional[List[str]]=None, parent_path: Optional[str]=) -> Union[(StructType, DataType)]: '\n A patient\'s point-in-time set of recommendations (i.e. forecasting) according\n to a published schedule with optional supporting justification.\n\n\n resourceType: This is a ImmunizationRecommendation resource\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n identifier: A unique identifier assigned to this particular recommendation record.\n\n patient: The patient the recommendation(s) are for.\n\n date: The date the immunization recommendation(s) were created.\n\n authority: Indicates the authority who published the protocol (e.g. ACIP).\n\n recommendation: Vaccine administration recommendations.\n\n ' if (extension_fields is None): extension_fields = ['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueReference', 'valueCodeableConcept', 'valueAddress'] from spark_fhir_schemas.r4.simple_types.id import idSchema from spark_fhir_schemas.r4.complex_types.meta import MetaSchema from spark_fhir_schemas.r4.simple_types.uri import uriSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema from spark_fhir_schemas.r4.complex_types.immunizationrecommendation_recommendation import ImmunizationRecommendation_RecommendationSchema if ((max_recursion_limit and (nesting_list.count('ImmunizationRecommendation') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['ImmunizationRecommendation']) my_parent_path = ((parent_path + '.immunizationrecommendation') if parent_path else 'immunizationrecommendation') schema = StructType([StructField('resourceType', StringType(), True), StructField('id', idSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.id')), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('implicitRules', uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.implicitrules')), True), StructField('language', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.language')), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('patient', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('date', dateTimeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.date')), True), StructField('authority', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('recommendation', ArrayType(ImmunizationRecommendation_RecommendationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=None, extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False, use_date_for: Optional[List[str]]=None, parent_path: Optional[str]=) -> Union[(StructType, DataType)]: '\n A patient\'s point-in-time set of recommendations (i.e. forecasting) according\n to a published schedule with optional supporting justification.\n\n\n resourceType: This is a ImmunizationRecommendation resource\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n identifier: A unique identifier assigned to this particular recommendation record.\n\n patient: The patient the recommendation(s) are for.\n\n date: The date the immunization recommendation(s) were created.\n\n authority: Indicates the authority who published the protocol (e.g. ACIP).\n\n recommendation: Vaccine administration recommendations.\n\n ' if (extension_fields is None): extension_fields = ['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueReference', 'valueCodeableConcept', 'valueAddress'] from spark_fhir_schemas.r4.simple_types.id import idSchema from spark_fhir_schemas.r4.complex_types.meta import MetaSchema from spark_fhir_schemas.r4.simple_types.uri import uriSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema from spark_fhir_schemas.r4.complex_types.immunizationrecommendation_recommendation import ImmunizationRecommendation_RecommendationSchema if ((max_recursion_limit and (nesting_list.count('ImmunizationRecommendation') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['ImmunizationRecommendation']) my_parent_path = ((parent_path + '.immunizationrecommendation') if parent_path else 'immunizationrecommendation') schema = StructType([StructField('resourceType', StringType(), True), StructField('id', idSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.id')), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('implicitRules', uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.implicitrules')), True), StructField('language', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.language')), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('patient', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('date', dateTimeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=(my_parent_path + '.date')), True), StructField('authority', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('recommendation', ArrayType(ImmunizationRecommendation_RecommendationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema<|docstring|>A patient's point-in-time set of recommendations (i.e. forecasting) according to a published schedule with optional supporting justification. resourceType: This is a ImmunizationRecommendation resource id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. extension: May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). identifier: A unique identifier assigned to this particular recommendation record. patient: The patient the recommendation(s) are for. date: The date the immunization recommendation(s) were created. authority: Indicates the authority who published the protocol (e.g. ACIP). recommendation: Vaccine administration recommendations.<|endoftext|>
81025a733f29c88514879423f02ace69870e13c119f484a96c9d71e4f465770d
def heatmap(logdir: str, mode: str='single', skip_plot: bool=False, freq: int=100, framerate: int=6, gen: int=None): 'Plots the heatmaps for archives in a logdir.\n\n Args:\n logdir: Path to experiment logging directory.\n mode:\n - "single": plot the archive and save to logdir /\n `heatmap_archive_{gen}.{pdf,png,svg}`\n - "video": plot every `freq` generations and save to the directory\n logdir / `heatmap_archive`; logdir / `heatmap_archive.mp4` is also\n created from these images with ffmpeg.\n skip_plot: Skip plotting the heatmaps and just make the video. Only\n applies to "video" mode.\n freq: Frequency (in terms of generations) to plot heatmaps for video.\n Only applies to "video" mode.\n framerate: Framerate for the video. Only applies to "video" mode.\n gen: Generation to plot -- only applies to "single" mode.\n None indicates the final gen.\n ' logdir = load_experiment(logdir) analysis_id = get_analysis_id() if (len(gin.query_parameter('GridArchive.dims')) != 2): logger.error('Heatmaps not supported for non-2D archives') return plot_kwargs = {'square': True, 'cmap': 'viridis', 'pcm_kwargs': {'rasterized': True}} plot_kwargs.update({'vmin': ANALYSIS_INFO[analysis_id]['min_score'], 'vmax': ANALYSIS_INFO[analysis_id]['max_score']}) total_gens = load_metrics(logdir).total_itrs gen = (total_gens if (gen is None) else gen) if (mode == 'single'): plot_generation(mode, logdir, analysis_id, gen, plot_kwargs, [f'heatmap_archive_{gen}.pdf', f'heatmap_archive_{gen}.png', f'heatmap_archive_{gen}.svg']) elif (mode == 'video'): if (not skip_plot): shutil.rmtree(logdir.pdir('heatmap_archive/'), ignore_errors=True) digits = int(np.ceil(np.log10((total_gens + 1)))) for g in range((total_gens + 1)): try: if (((g % freq) == 0) or (g == total_gens)): plot_generation(mode, logdir, analysis_id, g, plot_kwargs, [f'heatmap_archive/{g:0{digits}}.png']) except ValueError as e: logger.error('ValueError caught. Have you tried setting the max objective in objectives/__init__.py ?') raise e os.system(f"""ffmpeg -an -r {framerate} -i "{logdir.file('heatmap_archive/%*.png')}" -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 "{logdir.file('heatmap_archive.mp4')}" -y """) else: raise ValueError(f"Unknown mode '{mode}'")
Plots the heatmaps for archives in a logdir. Args: logdir: Path to experiment logging directory. mode: - "single": plot the archive and save to logdir / `heatmap_archive_{gen}.{pdf,png,svg}` - "video": plot every `freq` generations and save to the directory logdir / `heatmap_archive`; logdir / `heatmap_archive.mp4` is also created from these images with ffmpeg. skip_plot: Skip plotting the heatmaps and just make the video. Only applies to "video" mode. freq: Frequency (in terms of generations) to plot heatmaps for video. Only applies to "video" mode. framerate: Framerate for the video. Only applies to "video" mode. gen: Generation to plot -- only applies to "single" mode. None indicates the final gen.
src/analysis/heatmap.py
heatmap
icaros-usc/dqd-rl
6
python
def heatmap(logdir: str, mode: str='single', skip_plot: bool=False, freq: int=100, framerate: int=6, gen: int=None): 'Plots the heatmaps for archives in a logdir.\n\n Args:\n logdir: Path to experiment logging directory.\n mode:\n - "single": plot the archive and save to logdir /\n `heatmap_archive_{gen}.{pdf,png,svg}`\n - "video": plot every `freq` generations and save to the directory\n logdir / `heatmap_archive`; logdir / `heatmap_archive.mp4` is also\n created from these images with ffmpeg.\n skip_plot: Skip plotting the heatmaps and just make the video. Only\n applies to "video" mode.\n freq: Frequency (in terms of generations) to plot heatmaps for video.\n Only applies to "video" mode.\n framerate: Framerate for the video. Only applies to "video" mode.\n gen: Generation to plot -- only applies to "single" mode.\n None indicates the final gen.\n ' logdir = load_experiment(logdir) analysis_id = get_analysis_id() if (len(gin.query_parameter('GridArchive.dims')) != 2): logger.error('Heatmaps not supported for non-2D archives') return plot_kwargs = {'square': True, 'cmap': 'viridis', 'pcm_kwargs': {'rasterized': True}} plot_kwargs.update({'vmin': ANALYSIS_INFO[analysis_id]['min_score'], 'vmax': ANALYSIS_INFO[analysis_id]['max_score']}) total_gens = load_metrics(logdir).total_itrs gen = (total_gens if (gen is None) else gen) if (mode == 'single'): plot_generation(mode, logdir, analysis_id, gen, plot_kwargs, [f'heatmap_archive_{gen}.pdf', f'heatmap_archive_{gen}.png', f'heatmap_archive_{gen}.svg']) elif (mode == 'video'): if (not skip_plot): shutil.rmtree(logdir.pdir('heatmap_archive/'), ignore_errors=True) digits = int(np.ceil(np.log10((total_gens + 1)))) for g in range((total_gens + 1)): try: if (((g % freq) == 0) or (g == total_gens)): plot_generation(mode, logdir, analysis_id, g, plot_kwargs, [f'heatmap_archive/{g:0{digits}}.png']) except ValueError as e: logger.error('ValueError caught. Have you tried setting the max objective in objectives/__init__.py ?') raise e os.system(f"ffmpeg -an -r {framerate} -i "{logdir.file('heatmap_archive/%*.png')}" -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 "{logdir.file('heatmap_archive.mp4')}" -y ") else: raise ValueError(f"Unknown mode '{mode}'")
def heatmap(logdir: str, mode: str='single', skip_plot: bool=False, freq: int=100, framerate: int=6, gen: int=None): 'Plots the heatmaps for archives in a logdir.\n\n Args:\n logdir: Path to experiment logging directory.\n mode:\n - "single": plot the archive and save to logdir /\n `heatmap_archive_{gen}.{pdf,png,svg}`\n - "video": plot every `freq` generations and save to the directory\n logdir / `heatmap_archive`; logdir / `heatmap_archive.mp4` is also\n created from these images with ffmpeg.\n skip_plot: Skip plotting the heatmaps and just make the video. Only\n applies to "video" mode.\n freq: Frequency (in terms of generations) to plot heatmaps for video.\n Only applies to "video" mode.\n framerate: Framerate for the video. Only applies to "video" mode.\n gen: Generation to plot -- only applies to "single" mode.\n None indicates the final gen.\n ' logdir = load_experiment(logdir) analysis_id = get_analysis_id() if (len(gin.query_parameter('GridArchive.dims')) != 2): logger.error('Heatmaps not supported for non-2D archives') return plot_kwargs = {'square': True, 'cmap': 'viridis', 'pcm_kwargs': {'rasterized': True}} plot_kwargs.update({'vmin': ANALYSIS_INFO[analysis_id]['min_score'], 'vmax': ANALYSIS_INFO[analysis_id]['max_score']}) total_gens = load_metrics(logdir).total_itrs gen = (total_gens if (gen is None) else gen) if (mode == 'single'): plot_generation(mode, logdir, analysis_id, gen, plot_kwargs, [f'heatmap_archive_{gen}.pdf', f'heatmap_archive_{gen}.png', f'heatmap_archive_{gen}.svg']) elif (mode == 'video'): if (not skip_plot): shutil.rmtree(logdir.pdir('heatmap_archive/'), ignore_errors=True) digits = int(np.ceil(np.log10((total_gens + 1)))) for g in range((total_gens + 1)): try: if (((g % freq) == 0) or (g == total_gens)): plot_generation(mode, logdir, analysis_id, g, plot_kwargs, [f'heatmap_archive/{g:0{digits}}.png']) except ValueError as e: logger.error('ValueError caught. Have you tried setting the max objective in objectives/__init__.py ?') raise e os.system(f"ffmpeg -an -r {framerate} -i "{logdir.file('heatmap_archive/%*.png')}" -vcodec libx264 -pix_fmt yuv420p -profile:v baseline -level 3 "{logdir.file('heatmap_archive.mp4')}" -y ") else: raise ValueError(f"Unknown mode '{mode}'")<|docstring|>Plots the heatmaps for archives in a logdir. Args: logdir: Path to experiment logging directory. mode: - "single": plot the archive and save to logdir / `heatmap_archive_{gen}.{pdf,png,svg}` - "video": plot every `freq` generations and save to the directory logdir / `heatmap_archive`; logdir / `heatmap_archive.mp4` is also created from these images with ffmpeg. skip_plot: Skip plotting the heatmaps and just make the video. Only applies to "video" mode. freq: Frequency (in terms of generations) to plot heatmaps for video. Only applies to "video" mode. framerate: Framerate for the video. Only applies to "video" mode. gen: Generation to plot -- only applies to "single" mode. None indicates the final gen.<|endoftext|>
7029c7ab14cda1c178b6ff63a23287dc1982a8ffcd75d9576683d604292a66f4
def parse_current_day(shadowserver_html): '\n Parse newest links from shadowserver web page.(newest day)\n :return: Parsed data of current day\n ' current_year = shadowserver_html.li for year in shadowserver_html.li.next_siblings: if (year != '\n'): current_year = year current_month = current_year.li for month in current_year.li.next_siblings: if (month != '\n'): current_month = month current_day = current_month.li for day in current_month.li.next_siblings: if (day != '\n'): current_day = day return current_day
Parse newest links from shadowserver web page.(newest day) :return: Parsed data of current day
crusoe_observe/vulnerability-component/shadowserver_module/Download.py
parse_current_day
CSIRT-MU/CRUSOE
3
python
def parse_current_day(shadowserver_html): '\n Parse newest links from shadowserver web page.(newest day)\n :return: Parsed data of current day\n ' current_year = shadowserver_html.li for year in shadowserver_html.li.next_siblings: if (year != '\n'): current_year = year current_month = current_year.li for month in current_year.li.next_siblings: if (month != '\n'): current_month = month current_day = current_month.li for day in current_month.li.next_siblings: if (day != '\n'): current_day = day return current_day
def parse_current_day(shadowserver_html): '\n Parse newest links from shadowserver web page.(newest day)\n :return: Parsed data of current day\n ' current_year = shadowserver_html.li for year in shadowserver_html.li.next_siblings: if (year != '\n'): current_year = year current_month = current_year.li for month in current_year.li.next_siblings: if (month != '\n'): current_month = month current_day = current_month.li for day in current_month.li.next_siblings: if (day != '\n'): current_day = day return current_day<|docstring|>Parse newest links from shadowserver web page.(newest day) :return: Parsed data of current day<|endoftext|>
24990c510b798112c0879cd1432fe533f6942a26d5107eb9c0e491d411a2f922
def current_day_scan(location, user, password, logger=structlog.get_logger()): '\n Connect to shadowserver, parse most actual files and download them to the current directory.\n :param location: where will be downloaded csv saved\n :param user: username login credential\n :param password: password for username specified in previous argument\n :param logger: logger for the method\n :return: True if task was successful, exit with error otherwise\n ' payload = {'user': user, 'password': password} logger.info('Signing into Shadowserver...') try: response = requests.post('https://dl.shadowserver.org/reports/index.php', data=payload) if ('Invalid username/password combination' in response.text): raise requests.HTTPError('Invalid credentials') logger.info('Getting list of files which will be downloaded...') soup = BeautifulSoup(response.text, 'xml') number_of_downloaded_files = 0 for section in parse_current_day(soup).find_all('a', href=True): url = section['href'] try: remote_file = urlopen(url, cafile=certifi.where()) content = remote_file.info()['Content-Disposition'] (_, params) = cgi.parse_header(content) filename = (location + params['filename']) file = open(filename, 'wb') file.write(remote_file.read()) file.close() logger.info(f"File {params['filename']} has been downloaded") number_of_downloaded_files += 1 except URLError as error: logger.warning(f"File {params['filename']} has't been downloaded and won't be processed", error_message=error) logger.info(('Files downloaded: %s' % number_of_downloaded_files)) remove_corrupted_files(location) except requests.RequestException as error: logger.error("Can't connect and sign into Shadowserver", error_message=error) sys.exit(1) return True
Connect to shadowserver, parse most actual files and download them to the current directory. :param location: where will be downloaded csv saved :param user: username login credential :param password: password for username specified in previous argument :param logger: logger for the method :return: True if task was successful, exit with error otherwise
crusoe_observe/vulnerability-component/shadowserver_module/Download.py
current_day_scan
CSIRT-MU/CRUSOE
3
python
def current_day_scan(location, user, password, logger=structlog.get_logger()): '\n Connect to shadowserver, parse most actual files and download them to the current directory.\n :param location: where will be downloaded csv saved\n :param user: username login credential\n :param password: password for username specified in previous argument\n :param logger: logger for the method\n :return: True if task was successful, exit with error otherwise\n ' payload = {'user': user, 'password': password} logger.info('Signing into Shadowserver...') try: response = requests.post('https://dl.shadowserver.org/reports/index.php', data=payload) if ('Invalid username/password combination' in response.text): raise requests.HTTPError('Invalid credentials') logger.info('Getting list of files which will be downloaded...') soup = BeautifulSoup(response.text, 'xml') number_of_downloaded_files = 0 for section in parse_current_day(soup).find_all('a', href=True): url = section['href'] try: remote_file = urlopen(url, cafile=certifi.where()) content = remote_file.info()['Content-Disposition'] (_, params) = cgi.parse_header(content) filename = (location + params['filename']) file = open(filename, 'wb') file.write(remote_file.read()) file.close() logger.info(f"File {params['filename']} has been downloaded") number_of_downloaded_files += 1 except URLError as error: logger.warning(f"File {params['filename']} has't been downloaded and won't be processed", error_message=error) logger.info(('Files downloaded: %s' % number_of_downloaded_files)) remove_corrupted_files(location) except requests.RequestException as error: logger.error("Can't connect and sign into Shadowserver", error_message=error) sys.exit(1) return True
def current_day_scan(location, user, password, logger=structlog.get_logger()): '\n Connect to shadowserver, parse most actual files and download them to the current directory.\n :param location: where will be downloaded csv saved\n :param user: username login credential\n :param password: password for username specified in previous argument\n :param logger: logger for the method\n :return: True if task was successful, exit with error otherwise\n ' payload = {'user': user, 'password': password} logger.info('Signing into Shadowserver...') try: response = requests.post('https://dl.shadowserver.org/reports/index.php', data=payload) if ('Invalid username/password combination' in response.text): raise requests.HTTPError('Invalid credentials') logger.info('Getting list of files which will be downloaded...') soup = BeautifulSoup(response.text, 'xml') number_of_downloaded_files = 0 for section in parse_current_day(soup).find_all('a', href=True): url = section['href'] try: remote_file = urlopen(url, cafile=certifi.where()) content = remote_file.info()['Content-Disposition'] (_, params) = cgi.parse_header(content) filename = (location + params['filename']) file = open(filename, 'wb') file.write(remote_file.read()) file.close() logger.info(f"File {params['filename']} has been downloaded") number_of_downloaded_files += 1 except URLError as error: logger.warning(f"File {params['filename']} has't been downloaded and won't be processed", error_message=error) logger.info(('Files downloaded: %s' % number_of_downloaded_files)) remove_corrupted_files(location) except requests.RequestException as error: logger.error("Can't connect and sign into Shadowserver", error_message=error) sys.exit(1) return True<|docstring|>Connect to shadowserver, parse most actual files and download them to the current directory. :param location: where will be downloaded csv saved :param user: username login credential :param password: password for username specified in previous argument :param logger: logger for the method :return: True if task was successful, exit with error otherwise<|endoftext|>
5c9c34268625dc1e36d9403082635747fa8d0f4ab82fb43576d0b6af1e58cab8
def parse_conllu_plus(file: Optional[str]=None, text: Optional[Union[(str, List[str])]]=None, encoding: str='utf8') -> pd.DataFrame: "\n Parses Conll-U Plus format into a MultiIndex Pandas Dataframe\n :param file: Path to the conll file\n :param text: Conll file's content\n :param encoding: Encoding of the text\n :return:\n MultiIndex Pandas Dataframe where the first index represents the sentence, and the second the token position\n " if ((file is None) and (text is None)): raise ValueError('file or text must have a value other than None!') if ((text is None) and (file is not None)): with open(file, mode='r', encoding=encoding) as f: text = f.readlines() if (type(text) is str): text = text.split('\n') sentence_idx = 0 token_position = 0 sent_ = [] idx_ = [] cols = None for row in text: row = row.rstrip('\n') if row.startswith(_COLUMNS_META): cols = row[len(_COLUMNS_META):].split(' ') continue elif row.startswith('# '): continue else: if (len(row) == 0): token_position = 0 sentence_idx += 1 continue values = row.split('\t') sent_.append(values) idx_.append((sentence_idx, token_position)) token_position += 1 if (cols is None): cols = [f'Col_{i}' for i in range(len(sent_[0]))] index = pd.MultiIndex.from_tuples(idx_, names=['sentence_idx_', 'token_idx_']) df = pd.DataFrame(data=sent_, index=index, columns=cols) return df
Parses Conll-U Plus format into a MultiIndex Pandas Dataframe :param file: Path to the conll file :param text: Conll file's content :param encoding: Encoding of the text :return: MultiIndex Pandas Dataframe where the first index represents the sentence, and the second the token position
paddle/datasets/utils/parser/conll.py
parse_conllu_plus
ficstamas/paddle
0
python
def parse_conllu_plus(file: Optional[str]=None, text: Optional[Union[(str, List[str])]]=None, encoding: str='utf8') -> pd.DataFrame: "\n Parses Conll-U Plus format into a MultiIndex Pandas Dataframe\n :param file: Path to the conll file\n :param text: Conll file's content\n :param encoding: Encoding of the text\n :return:\n MultiIndex Pandas Dataframe where the first index represents the sentence, and the second the token position\n " if ((file is None) and (text is None)): raise ValueError('file or text must have a value other than None!') if ((text is None) and (file is not None)): with open(file, mode='r', encoding=encoding) as f: text = f.readlines() if (type(text) is str): text = text.split('\n') sentence_idx = 0 token_position = 0 sent_ = [] idx_ = [] cols = None for row in text: row = row.rstrip('\n') if row.startswith(_COLUMNS_META): cols = row[len(_COLUMNS_META):].split(' ') continue elif row.startswith('# '): continue else: if (len(row) == 0): token_position = 0 sentence_idx += 1 continue values = row.split('\t') sent_.append(values) idx_.append((sentence_idx, token_position)) token_position += 1 if (cols is None): cols = [f'Col_{i}' for i in range(len(sent_[0]))] index = pd.MultiIndex.from_tuples(idx_, names=['sentence_idx_', 'token_idx_']) df = pd.DataFrame(data=sent_, index=index, columns=cols) return df
def parse_conllu_plus(file: Optional[str]=None, text: Optional[Union[(str, List[str])]]=None, encoding: str='utf8') -> pd.DataFrame: "\n Parses Conll-U Plus format into a MultiIndex Pandas Dataframe\n :param file: Path to the conll file\n :param text: Conll file's content\n :param encoding: Encoding of the text\n :return:\n MultiIndex Pandas Dataframe where the first index represents the sentence, and the second the token position\n " if ((file is None) and (text is None)): raise ValueError('file or text must have a value other than None!') if ((text is None) and (file is not None)): with open(file, mode='r', encoding=encoding) as f: text = f.readlines() if (type(text) is str): text = text.split('\n') sentence_idx = 0 token_position = 0 sent_ = [] idx_ = [] cols = None for row in text: row = row.rstrip('\n') if row.startswith(_COLUMNS_META): cols = row[len(_COLUMNS_META):].split(' ') continue elif row.startswith('# '): continue else: if (len(row) == 0): token_position = 0 sentence_idx += 1 continue values = row.split('\t') sent_.append(values) idx_.append((sentence_idx, token_position)) token_position += 1 if (cols is None): cols = [f'Col_{i}' for i in range(len(sent_[0]))] index = pd.MultiIndex.from_tuples(idx_, names=['sentence_idx_', 'token_idx_']) df = pd.DataFrame(data=sent_, index=index, columns=cols) return df<|docstring|>Parses Conll-U Plus format into a MultiIndex Pandas Dataframe :param file: Path to the conll file :param text: Conll file's content :param encoding: Encoding of the text :return: MultiIndex Pandas Dataframe where the first index represents the sentence, and the second the token position<|endoftext|>
af2c90548cc2f24a0b42fd9271a58455cbf5aa4f0b243a67521e594dde163806
async def test_ecobee_occupancy_setup(hass): 'Test that an Ecbobee occupancy sensor be correctly setup in HA.' accessories = (await setup_accessories_from_file(hass, 'ecobee_occupancy.json')) (await setup_test_accessories(hass, accessories)) (await assert_devices_and_entities_created(hass, DeviceTestInfo(unique_id=HUB_TEST_ACCESSORY_ID, name='Master Fan', model='ecobee Switch+', manufacturer='ecobee Inc.', sw_version='4.5.130201', hw_version='', serial_number='111111111111', devices=[], entities=[EntityTestInfo(entity_id='binary_sensor.master_fan', friendly_name='Master Fan', unique_id='homekit-111111111111-56', state='off')])))
Test that an Ecbobee occupancy sensor be correctly setup in HA.
tests/components/homekit_controller/specific_devices/test_ecobee_occupancy.py
test_ecobee_occupancy_setup
GrandMoff100/homeassistant-core
30,023
python
async def test_ecobee_occupancy_setup(hass): accessories = (await setup_accessories_from_file(hass, 'ecobee_occupancy.json')) (await setup_test_accessories(hass, accessories)) (await assert_devices_and_entities_created(hass, DeviceTestInfo(unique_id=HUB_TEST_ACCESSORY_ID, name='Master Fan', model='ecobee Switch+', manufacturer='ecobee Inc.', sw_version='4.5.130201', hw_version=, serial_number='111111111111', devices=[], entities=[EntityTestInfo(entity_id='binary_sensor.master_fan', friendly_name='Master Fan', unique_id='homekit-111111111111-56', state='off')])))
async def test_ecobee_occupancy_setup(hass): accessories = (await setup_accessories_from_file(hass, 'ecobee_occupancy.json')) (await setup_test_accessories(hass, accessories)) (await assert_devices_and_entities_created(hass, DeviceTestInfo(unique_id=HUB_TEST_ACCESSORY_ID, name='Master Fan', model='ecobee Switch+', manufacturer='ecobee Inc.', sw_version='4.5.130201', hw_version=, serial_number='111111111111', devices=[], entities=[EntityTestInfo(entity_id='binary_sensor.master_fan', friendly_name='Master Fan', unique_id='homekit-111111111111-56', state='off')])))<|docstring|>Test that an Ecbobee occupancy sensor be correctly setup in HA.<|endoftext|>
f6a767117c5ef263184adb7dfd9e183b16d79aac25342c626ab74757ec612a1f
def bipartiteMatch(graph): 'Find maximum cardinality matching of a bipartite graph (U,V,E).\n\tThe input format is a dictionary mapping members of U to a list\n\tof their neighbors in V. The output is a triple (M,A,B) where M is a\n\tdictionary mapping members of V to their matches in U, A is the part\n\tof the maximum independent set in U, and B is the part of the MIS in V.\n\tThe same object may occur in both U and V, and is treated as two\n\tdistinct vertices if this happens.' matching = {} for u in graph: for v in graph[u]: if (v not in matching): matching[v] = u break while 1: preds = {} unmatched = [] pred = dict([(u, unmatched) for u in graph]) for v in matching: del pred[matching[v]] layer = list(pred) while (layer and (not unmatched)): newLayer = {} for u in layer: for v in graph[u]: if (v not in preds): newLayer.setdefault(v, []).append(u) layer = [] for v in newLayer: preds[v] = newLayer[v] if (v in matching): layer.append(matching[v]) pred[matching[v]] = v else: unmatched.append(v) if (not unmatched): unlayered = {} for u in graph: for v in graph[u]: if (v not in preds): unlayered[v] = None return (matching, list(pred), list(unlayered)) def recurse(v): if (v in preds): L = preds[v] del preds[v] for u in L: if (u in pred): pu = pred[u] del pred[u] if ((pu is unmatched) or recurse(pu)): matching[v] = u return 1 return 0 for v in unmatched: recurse(v)
Find maximum cardinality matching of a bipartite graph (U,V,E). The input format is a dictionary mapping members of U to a list of their neighbors in V. The output is a triple (M,A,B) where M is a dictionary mapping members of V to their matches in U, A is the part of the maximum independent set in U, and B is the part of the MIS in V. The same object may occur in both U and V, and is treated as two distinct vertices if this happens.
src/Decomposition_Algorithms.py
bipartiteMatch
Berberer/BvN-Switch-Simulator
1
python
def bipartiteMatch(graph): 'Find maximum cardinality matching of a bipartite graph (U,V,E).\n\tThe input format is a dictionary mapping members of U to a list\n\tof their neighbors in V. The output is a triple (M,A,B) where M is a\n\tdictionary mapping members of V to their matches in U, A is the part\n\tof the maximum independent set in U, and B is the part of the MIS in V.\n\tThe same object may occur in both U and V, and is treated as two\n\tdistinct vertices if this happens.' matching = {} for u in graph: for v in graph[u]: if (v not in matching): matching[v] = u break while 1: preds = {} unmatched = [] pred = dict([(u, unmatched) for u in graph]) for v in matching: del pred[matching[v]] layer = list(pred) while (layer and (not unmatched)): newLayer = {} for u in layer: for v in graph[u]: if (v not in preds): newLayer.setdefault(v, []).append(u) layer = [] for v in newLayer: preds[v] = newLayer[v] if (v in matching): layer.append(matching[v]) pred[matching[v]] = v else: unmatched.append(v) if (not unmatched): unlayered = {} for u in graph: for v in graph[u]: if (v not in preds): unlayered[v] = None return (matching, list(pred), list(unlayered)) def recurse(v): if (v in preds): L = preds[v] del preds[v] for u in L: if (u in pred): pu = pred[u] del pred[u] if ((pu is unmatched) or recurse(pu)): matching[v] = u return 1 return 0 for v in unmatched: recurse(v)
def bipartiteMatch(graph): 'Find maximum cardinality matching of a bipartite graph (U,V,E).\n\tThe input format is a dictionary mapping members of U to a list\n\tof their neighbors in V. The output is a triple (M,A,B) where M is a\n\tdictionary mapping members of V to their matches in U, A is the part\n\tof the maximum independent set in U, and B is the part of the MIS in V.\n\tThe same object may occur in both U and V, and is treated as two\n\tdistinct vertices if this happens.' matching = {} for u in graph: for v in graph[u]: if (v not in matching): matching[v] = u break while 1: preds = {} unmatched = [] pred = dict([(u, unmatched) for u in graph]) for v in matching: del pred[matching[v]] layer = list(pred) while (layer and (not unmatched)): newLayer = {} for u in layer: for v in graph[u]: if (v not in preds): newLayer.setdefault(v, []).append(u) layer = [] for v in newLayer: preds[v] = newLayer[v] if (v in matching): layer.append(matching[v]) pred[matching[v]] = v else: unmatched.append(v) if (not unmatched): unlayered = {} for u in graph: for v in graph[u]: if (v not in preds): unlayered[v] = None return (matching, list(pred), list(unlayered)) def recurse(v): if (v in preds): L = preds[v] del preds[v] for u in L: if (u in pred): pu = pred[u] del pred[u] if ((pu is unmatched) or recurse(pu)): matching[v] = u return 1 return 0 for v in unmatched: recurse(v)<|docstring|>Find maximum cardinality matching of a bipartite graph (U,V,E). The input format is a dictionary mapping members of U to a list of their neighbors in V. The output is a triple (M,A,B) where M is a dictionary mapping members of V to their matches in U, A is the part of the maximum independent set in U, and B is the part of the MIS in V. The same object may occur in both U and V, and is treated as two distinct vertices if this happens.<|endoftext|>
cbfea04a709fa581b1aaac452bb97e30a00d1ec3c543c56ff58558fc0ef8d70d
def main(): '\n TODO:\n ' circle_size = w.width count = 0 while (circle_size >= 0): if ((count % 2) == 0): circle = GOval(circle_size, circle_size) circle.filled = True circle.fill_color = 'white' circle.color = 'white' w.add(circle, ((w.width - circle.width) / 2), ((w.height - circle.height) / 2)) circle_size -= CIRCLE_REDUCE count += 1 elif ((count % 2) == 1): circle = GOval(circle_size, circle_size) circle.filled = True w.add(circle, ((w.width - circle.width) / 2), ((w.height - circle.height) / 2)) circle_size -= CIRCLE_REDUCE count += 1 pause(500) img = GImage('winnie') start_point = (0 - img.height) w.add(img, x=((w.width - img.width) / 2), y=start_point) speed = 1 while True: img.move(0, speed) speed += 1 if ((img.y + img.height) >= w.height): speed = (- speed) elif (((img.y + img.height) < w.height) and (speed == (- 25))): break pause(20) body_move = (img.y + img.height) body = GRect((img.width / 2), (img.width / 2)) while ((body.y + body.height) < (w.height - (body.height * 2))): body = GRect((img.width / 2), (img.width / 2)) body.filled = True body.fill_color = 'red' body.color = 'red' w.add(body, (img.x + (body.width / 2)), body_move) body_move += 1 pause(1) hand_move_y = (img.y + img.height) hand_move_x = 0 leg_move_y = (body_move + body.height) leg_move_x = 0 leg = GRect((img.width / 3), (img.width / 3)) while ((leg_move_y + leg.height) < w.height): l_leg = GRect((img.width / 3), (img.width / 3)) l_leg.filled = True l_leg.fill_color = 'yellow' l_leg.color = 'yellow' r_leg = GRect((img.width / 3), (img.width / 3)) r_leg.filled = True r_leg.fill_color = 'yellow' r_leg.color = 'yellow' l_hand = GRect((img.width / 3), (img.width / 3)) l_hand.filled = True l_hand.fill_color = 'yellow' l_hand.color = 'yellow' r_hand = GRect((img.width / 3), (img.width / 3)) r_hand.filled = True r_hand.fill_color = 'yellow' r_hand.color = 'yellow' w.add(l_hand, (((img.x - l_hand.width) + (body.width / 2)) + hand_move_x), hand_move_y) w.add(r_hand, (((img.x + img.width) - (body.width / 2)) - hand_move_x), hand_move_y) w.add(l_leg, (body.x - leg_move_x), leg_move_y) w.add(r_leg, (((body.x + body.width) - r_leg.width) + leg_move_x), leg_move_y) leg_move_x += 1 leg_move_y += 1 hand_move_x -= 1 hand_move_y -= 1 pause(1) banner = GRect(200, 100) banner.filled = True banner.fill_color = 'magenta' w.add(banner, 0, 0) title = GLabel('StanCode') title.font = 'roboto-30-bold' w.add(title, ((banner.width - title.width) / 2), ((banner.height + banner.height) / 2))
TODO:
sam_python/my_drawing/my_drawing.py
main
samfang9527/SC101
0
python
def main(): '\n \n ' circle_size = w.width count = 0 while (circle_size >= 0): if ((count % 2) == 0): circle = GOval(circle_size, circle_size) circle.filled = True circle.fill_color = 'white' circle.color = 'white' w.add(circle, ((w.width - circle.width) / 2), ((w.height - circle.height) / 2)) circle_size -= CIRCLE_REDUCE count += 1 elif ((count % 2) == 1): circle = GOval(circle_size, circle_size) circle.filled = True w.add(circle, ((w.width - circle.width) / 2), ((w.height - circle.height) / 2)) circle_size -= CIRCLE_REDUCE count += 1 pause(500) img = GImage('winnie') start_point = (0 - img.height) w.add(img, x=((w.width - img.width) / 2), y=start_point) speed = 1 while True: img.move(0, speed) speed += 1 if ((img.y + img.height) >= w.height): speed = (- speed) elif (((img.y + img.height) < w.height) and (speed == (- 25))): break pause(20) body_move = (img.y + img.height) body = GRect((img.width / 2), (img.width / 2)) while ((body.y + body.height) < (w.height - (body.height * 2))): body = GRect((img.width / 2), (img.width / 2)) body.filled = True body.fill_color = 'red' body.color = 'red' w.add(body, (img.x + (body.width / 2)), body_move) body_move += 1 pause(1) hand_move_y = (img.y + img.height) hand_move_x = 0 leg_move_y = (body_move + body.height) leg_move_x = 0 leg = GRect((img.width / 3), (img.width / 3)) while ((leg_move_y + leg.height) < w.height): l_leg = GRect((img.width / 3), (img.width / 3)) l_leg.filled = True l_leg.fill_color = 'yellow' l_leg.color = 'yellow' r_leg = GRect((img.width / 3), (img.width / 3)) r_leg.filled = True r_leg.fill_color = 'yellow' r_leg.color = 'yellow' l_hand = GRect((img.width / 3), (img.width / 3)) l_hand.filled = True l_hand.fill_color = 'yellow' l_hand.color = 'yellow' r_hand = GRect((img.width / 3), (img.width / 3)) r_hand.filled = True r_hand.fill_color = 'yellow' r_hand.color = 'yellow' w.add(l_hand, (((img.x - l_hand.width) + (body.width / 2)) + hand_move_x), hand_move_y) w.add(r_hand, (((img.x + img.width) - (body.width / 2)) - hand_move_x), hand_move_y) w.add(l_leg, (body.x - leg_move_x), leg_move_y) w.add(r_leg, (((body.x + body.width) - r_leg.width) + leg_move_x), leg_move_y) leg_move_x += 1 leg_move_y += 1 hand_move_x -= 1 hand_move_y -= 1 pause(1) banner = GRect(200, 100) banner.filled = True banner.fill_color = 'magenta' w.add(banner, 0, 0) title = GLabel('StanCode') title.font = 'roboto-30-bold' w.add(title, ((banner.width - title.width) / 2), ((banner.height + banner.height) / 2))
def main(): '\n \n ' circle_size = w.width count = 0 while (circle_size >= 0): if ((count % 2) == 0): circle = GOval(circle_size, circle_size) circle.filled = True circle.fill_color = 'white' circle.color = 'white' w.add(circle, ((w.width - circle.width) / 2), ((w.height - circle.height) / 2)) circle_size -= CIRCLE_REDUCE count += 1 elif ((count % 2) == 1): circle = GOval(circle_size, circle_size) circle.filled = True w.add(circle, ((w.width - circle.width) / 2), ((w.height - circle.height) / 2)) circle_size -= CIRCLE_REDUCE count += 1 pause(500) img = GImage('winnie') start_point = (0 - img.height) w.add(img, x=((w.width - img.width) / 2), y=start_point) speed = 1 while True: img.move(0, speed) speed += 1 if ((img.y + img.height) >= w.height): speed = (- speed) elif (((img.y + img.height) < w.height) and (speed == (- 25))): break pause(20) body_move = (img.y + img.height) body = GRect((img.width / 2), (img.width / 2)) while ((body.y + body.height) < (w.height - (body.height * 2))): body = GRect((img.width / 2), (img.width / 2)) body.filled = True body.fill_color = 'red' body.color = 'red' w.add(body, (img.x + (body.width / 2)), body_move) body_move += 1 pause(1) hand_move_y = (img.y + img.height) hand_move_x = 0 leg_move_y = (body_move + body.height) leg_move_x = 0 leg = GRect((img.width / 3), (img.width / 3)) while ((leg_move_y + leg.height) < w.height): l_leg = GRect((img.width / 3), (img.width / 3)) l_leg.filled = True l_leg.fill_color = 'yellow' l_leg.color = 'yellow' r_leg = GRect((img.width / 3), (img.width / 3)) r_leg.filled = True r_leg.fill_color = 'yellow' r_leg.color = 'yellow' l_hand = GRect((img.width / 3), (img.width / 3)) l_hand.filled = True l_hand.fill_color = 'yellow' l_hand.color = 'yellow' r_hand = GRect((img.width / 3), (img.width / 3)) r_hand.filled = True r_hand.fill_color = 'yellow' r_hand.color = 'yellow' w.add(l_hand, (((img.x - l_hand.width) + (body.width / 2)) + hand_move_x), hand_move_y) w.add(r_hand, (((img.x + img.width) - (body.width / 2)) - hand_move_x), hand_move_y) w.add(l_leg, (body.x - leg_move_x), leg_move_y) w.add(r_leg, (((body.x + body.width) - r_leg.width) + leg_move_x), leg_move_y) leg_move_x += 1 leg_move_y += 1 hand_move_x -= 1 hand_move_y -= 1 pause(1) banner = GRect(200, 100) banner.filled = True banner.fill_color = 'magenta' w.add(banner, 0, 0) title = GLabel('StanCode') title.font = 'roboto-30-bold' w.add(title, ((banner.width - title.width) / 2), ((banner.height + banner.height) / 2))<|docstring|>TODO:<|endoftext|>
d4331f0823e922154b692b74ba8fa5428df6ae04cfe913c54a56e0393b5ef3eb
def __init__(self, pd): '\n Initializes analyzer with a PhaseDiagram.\n\n Args:\n pd: Phase Diagram to analyze.\n ' self._pd = pd
Initializes analyzer with a PhaseDiagram. Args: pd: Phase Diagram to analyze.
pymatgen/phasediagram/analyzer.py
__init__
adozier/pymatgen
0
python
def __init__(self, pd): '\n Initializes analyzer with a PhaseDiagram.\n\n Args:\n pd: Phase Diagram to analyze.\n ' self._pd = pd
def __init__(self, pd): '\n Initializes analyzer with a PhaseDiagram.\n\n Args:\n pd: Phase Diagram to analyze.\n ' self._pd = pd<|docstring|>Initializes analyzer with a PhaseDiagram. Args: pd: Phase Diagram to analyze.<|endoftext|>
834388c96c4c1337a80b6732c8c7359ebc93770bbee3cdec415f667efa37ba2e
def _make_comp_matrix(self, complist): '\n Helper function to generates a normalized composition matrix from a\n list of compositions.\n ' return np.array([[comp.get_atomic_fraction(el) for el in self._pd.elements] for comp in complist])
Helper function to generates a normalized composition matrix from a list of compositions.
pymatgen/phasediagram/analyzer.py
_make_comp_matrix
adozier/pymatgen
0
python
def _make_comp_matrix(self, complist): '\n Helper function to generates a normalized composition matrix from a\n list of compositions.\n ' return np.array([[comp.get_atomic_fraction(el) for el in self._pd.elements] for comp in complist])
def _make_comp_matrix(self, complist): '\n Helper function to generates a normalized composition matrix from a\n list of compositions.\n ' return np.array([[comp.get_atomic_fraction(el) for el in self._pd.elements] for comp in complist])<|docstring|>Helper function to generates a normalized composition matrix from a list of compositions.<|endoftext|>
1dcb31b0b74a52b7e49fa18273b484a871073bfb3204416d3a8903242e6a6ab1
@lru_cache(1) def _get_facet(self, comp): '\n Get any facet that a composition falls into. Cached so successive\n calls at same composition are fast.\n ' if set(comp.elements).difference(self._pd.elements): raise ValueError('{} has elements not in the phase diagram {}'.format(comp, self._pd.elements)) c = [comp.get_atomic_fraction(e) for e in self._pd.elements[1:]] for (f, s) in zip(self._pd.facets, self._pd.simplices): if Simplex(s).in_simplex(c, (PDAnalyzer.numerical_tol / 10)): return f raise RuntimeError('No facet found for comp = {}'.format(comp))
Get any facet that a composition falls into. Cached so successive calls at same composition are fast.
pymatgen/phasediagram/analyzer.py
_get_facet
adozier/pymatgen
0
python
@lru_cache(1) def _get_facet(self, comp): '\n Get any facet that a composition falls into. Cached so successive\n calls at same composition are fast.\n ' if set(comp.elements).difference(self._pd.elements): raise ValueError('{} has elements not in the phase diagram {}'.format(comp, self._pd.elements)) c = [comp.get_atomic_fraction(e) for e in self._pd.elements[1:]] for (f, s) in zip(self._pd.facets, self._pd.simplices): if Simplex(s).in_simplex(c, (PDAnalyzer.numerical_tol / 10)): return f raise RuntimeError('No facet found for comp = {}'.format(comp))
@lru_cache(1) def _get_facet(self, comp): '\n Get any facet that a composition falls into. Cached so successive\n calls at same composition are fast.\n ' if set(comp.elements).difference(self._pd.elements): raise ValueError('{} has elements not in the phase diagram {}'.format(comp, self._pd.elements)) c = [comp.get_atomic_fraction(e) for e in self._pd.elements[1:]] for (f, s) in zip(self._pd.facets, self._pd.simplices): if Simplex(s).in_simplex(c, (PDAnalyzer.numerical_tol / 10)): return f raise RuntimeError('No facet found for comp = {}'.format(comp))<|docstring|>Get any facet that a composition falls into. Cached so successive calls at same composition are fast.<|endoftext|>
c74c05fc71107b4c41832d99db375dcf4f8e5922aade2fa517b3d3d35b1370fc
def get_decomposition(self, comp): '\n Provides the decomposition at a particular composition.\n\n Args:\n comp: A composition\n\n Returns:\n Decomposition as a dict of {Entry: amount}\n ' facet = self._get_facet(comp) comp_list = [self._pd.qhull_entries[i].composition for i in facet] m = self._make_comp_matrix(comp_list) compm = self._make_comp_matrix([comp]) decomp_amts = np.linalg.solve(m.T, compm.T) return {self._pd.qhull_entries[f]: amt[0] for (f, amt) in zip(facet, decomp_amts) if (abs(amt[0]) > PDAnalyzer.numerical_tol)}
Provides the decomposition at a particular composition. Args: comp: A composition Returns: Decomposition as a dict of {Entry: amount}
pymatgen/phasediagram/analyzer.py
get_decomposition
adozier/pymatgen
0
python
def get_decomposition(self, comp): '\n Provides the decomposition at a particular composition.\n\n Args:\n comp: A composition\n\n Returns:\n Decomposition as a dict of {Entry: amount}\n ' facet = self._get_facet(comp) comp_list = [self._pd.qhull_entries[i].composition for i in facet] m = self._make_comp_matrix(comp_list) compm = self._make_comp_matrix([comp]) decomp_amts = np.linalg.solve(m.T, compm.T) return {self._pd.qhull_entries[f]: amt[0] for (f, amt) in zip(facet, decomp_amts) if (abs(amt[0]) > PDAnalyzer.numerical_tol)}
def get_decomposition(self, comp): '\n Provides the decomposition at a particular composition.\n\n Args:\n comp: A composition\n\n Returns:\n Decomposition as a dict of {Entry: amount}\n ' facet = self._get_facet(comp) comp_list = [self._pd.qhull_entries[i].composition for i in facet] m = self._make_comp_matrix(comp_list) compm = self._make_comp_matrix([comp]) decomp_amts = np.linalg.solve(m.T, compm.T) return {self._pd.qhull_entries[f]: amt[0] for (f, amt) in zip(facet, decomp_amts) if (abs(amt[0]) > PDAnalyzer.numerical_tol)}<|docstring|>Provides the decomposition at a particular composition. Args: comp: A composition Returns: Decomposition as a dict of {Entry: amount}<|endoftext|>
3e4c40a0715a0adccc2aca8cff54e08d873537f192c77b99ed1f6bc4ebb97598
def get_hull_energy(self, comp): '\n Args:\n comp (Composition): Input composition\n\n Returns:\n Energy of lowest energy equilibrium at desired composition. Not\n normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)\n ' e = 0 for (k, v) in self.get_decomposition(comp).items(): e += (k.energy_per_atom * v) return (e * comp.num_atoms)
Args: comp (Composition): Input composition Returns: Energy of lowest energy equilibrium at desired composition. Not normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
pymatgen/phasediagram/analyzer.py
get_hull_energy
adozier/pymatgen
0
python
def get_hull_energy(self, comp): '\n Args:\n comp (Composition): Input composition\n\n Returns:\n Energy of lowest energy equilibrium at desired composition. Not\n normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)\n ' e = 0 for (k, v) in self.get_decomposition(comp).items(): e += (k.energy_per_atom * v) return (e * comp.num_atoms)
def get_hull_energy(self, comp): '\n Args:\n comp (Composition): Input composition\n\n Returns:\n Energy of lowest energy equilibrium at desired composition. Not\n normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)\n ' e = 0 for (k, v) in self.get_decomposition(comp).items(): e += (k.energy_per_atom * v) return (e * comp.num_atoms)<|docstring|>Args: comp (Composition): Input composition Returns: Energy of lowest energy equilibrium at desired composition. Not normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)<|endoftext|>
f78ae20188a017e962ff7fe228e07a175bd7986b5d41aa36e2f26800d1521666
def get_decomp_and_e_above_hull(self, entry, allow_negative=False): '\n Provides the decomposition and energy above convex hull for an entry.\n Due to caching, can be much faster if entries with the same composition\n are processed together.\n\n Args:\n entry: A PDEntry like object\n allow_negative: Whether to allow negative e_above_hulls. Used to\n calculate equilibrium reaction energies. Defaults to False.\n\n Returns:\n (decomp, energy above convex hull) Stable entries should have\n energy above hull of 0. The decomposition is provided as a dict of\n {Entry: amount}.\n ' if (entry in self._pd.stable_entries): return ({entry: 1}, 0) facet = self._get_facet(entry.composition) comp_list = [self._pd.qhull_entries[i].composition for i in facet] m = self._make_comp_matrix(comp_list) compm = self._make_comp_matrix([entry.composition]) decomp_amts = np.linalg.solve(m.T, compm.T)[(:, 0)] decomp = {self._pd.qhull_entries[facet[i]]: decomp_amts[i] for i in range(len(decomp_amts)) if (abs(decomp_amts[i]) > PDAnalyzer.numerical_tol)} energies = [self._pd.qhull_entries[i].energy_per_atom for i in facet] ehull = (entry.energy_per_atom - np.dot(decomp_amts, energies)) if (allow_negative or (ehull >= (- PDAnalyzer.numerical_tol))): return (decomp, ehull) raise ValueError('No valid decomp found!')
Provides the decomposition and energy above convex hull for an entry. Due to caching, can be much faster if entries with the same composition are processed together. Args: entry: A PDEntry like object allow_negative: Whether to allow negative e_above_hulls. Used to calculate equilibrium reaction energies. Defaults to False. Returns: (decomp, energy above convex hull) Stable entries should have energy above hull of 0. The decomposition is provided as a dict of {Entry: amount}.
pymatgen/phasediagram/analyzer.py
get_decomp_and_e_above_hull
adozier/pymatgen
0
python
def get_decomp_and_e_above_hull(self, entry, allow_negative=False): '\n Provides the decomposition and energy above convex hull for an entry.\n Due to caching, can be much faster if entries with the same composition\n are processed together.\n\n Args:\n entry: A PDEntry like object\n allow_negative: Whether to allow negative e_above_hulls. Used to\n calculate equilibrium reaction energies. Defaults to False.\n\n Returns:\n (decomp, energy above convex hull) Stable entries should have\n energy above hull of 0. The decomposition is provided as a dict of\n {Entry: amount}.\n ' if (entry in self._pd.stable_entries): return ({entry: 1}, 0) facet = self._get_facet(entry.composition) comp_list = [self._pd.qhull_entries[i].composition for i in facet] m = self._make_comp_matrix(comp_list) compm = self._make_comp_matrix([entry.composition]) decomp_amts = np.linalg.solve(m.T, compm.T)[(:, 0)] decomp = {self._pd.qhull_entries[facet[i]]: decomp_amts[i] for i in range(len(decomp_amts)) if (abs(decomp_amts[i]) > PDAnalyzer.numerical_tol)} energies = [self._pd.qhull_entries[i].energy_per_atom for i in facet] ehull = (entry.energy_per_atom - np.dot(decomp_amts, energies)) if (allow_negative or (ehull >= (- PDAnalyzer.numerical_tol))): return (decomp, ehull) raise ValueError('No valid decomp found!')
def get_decomp_and_e_above_hull(self, entry, allow_negative=False): '\n Provides the decomposition and energy above convex hull for an entry.\n Due to caching, can be much faster if entries with the same composition\n are processed together.\n\n Args:\n entry: A PDEntry like object\n allow_negative: Whether to allow negative e_above_hulls. Used to\n calculate equilibrium reaction energies. Defaults to False.\n\n Returns:\n (decomp, energy above convex hull) Stable entries should have\n energy above hull of 0. The decomposition is provided as a dict of\n {Entry: amount}.\n ' if (entry in self._pd.stable_entries): return ({entry: 1}, 0) facet = self._get_facet(entry.composition) comp_list = [self._pd.qhull_entries[i].composition for i in facet] m = self._make_comp_matrix(comp_list) compm = self._make_comp_matrix([entry.composition]) decomp_amts = np.linalg.solve(m.T, compm.T)[(:, 0)] decomp = {self._pd.qhull_entries[facet[i]]: decomp_amts[i] for i in range(len(decomp_amts)) if (abs(decomp_amts[i]) > PDAnalyzer.numerical_tol)} energies = [self._pd.qhull_entries[i].energy_per_atom for i in facet] ehull = (entry.energy_per_atom - np.dot(decomp_amts, energies)) if (allow_negative or (ehull >= (- PDAnalyzer.numerical_tol))): return (decomp, ehull) raise ValueError('No valid decomp found!')<|docstring|>Provides the decomposition and energy above convex hull for an entry. Due to caching, can be much faster if entries with the same composition are processed together. Args: entry: A PDEntry like object allow_negative: Whether to allow negative e_above_hulls. Used to calculate equilibrium reaction energies. Defaults to False. Returns: (decomp, energy above convex hull) Stable entries should have energy above hull of 0. The decomposition is provided as a dict of {Entry: amount}.<|endoftext|>
f200c6d27ed231b913099877f06a43cec159bf3c13f9242e384fce6151aa25eb
def get_e_above_hull(self, entry): '\n Provides the energy above convex hull for an entry\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Energy above convex hull of entry. Stable entries should have\n energy above hull of 0.\n ' return self.get_decomp_and_e_above_hull(entry)[1]
Provides the energy above convex hull for an entry Args: entry: A PDEntry like object Returns: Energy above convex hull of entry. Stable entries should have energy above hull of 0.
pymatgen/phasediagram/analyzer.py
get_e_above_hull
adozier/pymatgen
0
python
def get_e_above_hull(self, entry): '\n Provides the energy above convex hull for an entry\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Energy above convex hull of entry. Stable entries should have\n energy above hull of 0.\n ' return self.get_decomp_and_e_above_hull(entry)[1]
def get_e_above_hull(self, entry): '\n Provides the energy above convex hull for an entry\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Energy above convex hull of entry. Stable entries should have\n energy above hull of 0.\n ' return self.get_decomp_and_e_above_hull(entry)[1]<|docstring|>Provides the energy above convex hull for an entry Args: entry: A PDEntry like object Returns: Energy above convex hull of entry. Stable entries should have energy above hull of 0.<|endoftext|>
2522ab7797b5353ac738d8a3dc14094f9f734ededfdfea437cbb06ec70736ced
def get_equilibrium_reaction_energy(self, entry): '\n Provides the reaction energy of a stable entry from the neighboring\n equilibrium stable entries (also known as the inverse distance to\n hull).\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Equilibrium reaction energy of entry. Stable entries should have\n equilibrium reaction energy <= 0.\n ' if (entry not in self._pd.stable_entries): raise ValueError('Equilibrium reaction energy is available only for stable entries.') if entry.is_element: return 0 entries = [e for e in self._pd.stable_entries if (e != entry)] modpd = PhaseDiagram(entries, self._pd.elements) analyzer = PDAnalyzer(modpd) return analyzer.get_decomp_and_e_above_hull(entry, allow_negative=True)[1]
Provides the reaction energy of a stable entry from the neighboring equilibrium stable entries (also known as the inverse distance to hull). Args: entry: A PDEntry like object Returns: Equilibrium reaction energy of entry. Stable entries should have equilibrium reaction energy <= 0.
pymatgen/phasediagram/analyzer.py
get_equilibrium_reaction_energy
adozier/pymatgen
0
python
def get_equilibrium_reaction_energy(self, entry): '\n Provides the reaction energy of a stable entry from the neighboring\n equilibrium stable entries (also known as the inverse distance to\n hull).\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Equilibrium reaction energy of entry. Stable entries should have\n equilibrium reaction energy <= 0.\n ' if (entry not in self._pd.stable_entries): raise ValueError('Equilibrium reaction energy is available only for stable entries.') if entry.is_element: return 0 entries = [e for e in self._pd.stable_entries if (e != entry)] modpd = PhaseDiagram(entries, self._pd.elements) analyzer = PDAnalyzer(modpd) return analyzer.get_decomp_and_e_above_hull(entry, allow_negative=True)[1]
def get_equilibrium_reaction_energy(self, entry): '\n Provides the reaction energy of a stable entry from the neighboring\n equilibrium stable entries (also known as the inverse distance to\n hull).\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Equilibrium reaction energy of entry. Stable entries should have\n equilibrium reaction energy <= 0.\n ' if (entry not in self._pd.stable_entries): raise ValueError('Equilibrium reaction energy is available only for stable entries.') if entry.is_element: return 0 entries = [e for e in self._pd.stable_entries if (e != entry)] modpd = PhaseDiagram(entries, self._pd.elements) analyzer = PDAnalyzer(modpd) return analyzer.get_decomp_and_e_above_hull(entry, allow_negative=True)[1]<|docstring|>Provides the reaction energy of a stable entry from the neighboring equilibrium stable entries (also known as the inverse distance to hull). Args: entry: A PDEntry like object Returns: Equilibrium reaction energy of entry. Stable entries should have equilibrium reaction energy <= 0.<|endoftext|>
08f4624fea85c7797ae0a5db5c7428bd35da112a1e7aaf7e5f1e1a8c832d10b2
def get_facet_chempots(self, facet): '\n Calculates the chemical potentials for each element within a facet.\n\n Args:\n facet: Facet of the phase diagram.\n\n Returns:\n { element: chempot } for all elements in the phase diagram.\n ' complist = [self._pd.qhull_entries[i].composition for i in facet] energylist = [self._pd.qhull_entries[i].energy_per_atom for i in facet] m = self._make_comp_matrix(complist) chempots = np.linalg.solve(m, energylist) return dict(zip(self._pd.elements, chempots))
Calculates the chemical potentials for each element within a facet. Args: facet: Facet of the phase diagram. Returns: { element: chempot } for all elements in the phase diagram.
pymatgen/phasediagram/analyzer.py
get_facet_chempots
adozier/pymatgen
0
python
def get_facet_chempots(self, facet): '\n Calculates the chemical potentials for each element within a facet.\n\n Args:\n facet: Facet of the phase diagram.\n\n Returns:\n { element: chempot } for all elements in the phase diagram.\n ' complist = [self._pd.qhull_entries[i].composition for i in facet] energylist = [self._pd.qhull_entries[i].energy_per_atom for i in facet] m = self._make_comp_matrix(complist) chempots = np.linalg.solve(m, energylist) return dict(zip(self._pd.elements, chempots))
def get_facet_chempots(self, facet): '\n Calculates the chemical potentials for each element within a facet.\n\n Args:\n facet: Facet of the phase diagram.\n\n Returns:\n { element: chempot } for all elements in the phase diagram.\n ' complist = [self._pd.qhull_entries[i].composition for i in facet] energylist = [self._pd.qhull_entries[i].energy_per_atom for i in facet] m = self._make_comp_matrix(complist) chempots = np.linalg.solve(m, energylist) return dict(zip(self._pd.elements, chempots))<|docstring|>Calculates the chemical potentials for each element within a facet. Args: facet: Facet of the phase diagram. Returns: { element: chempot } for all elements in the phase diagram.<|endoftext|>
233e08f0593d3b2e51f4fb5455c52be7a73d2e6b7cb8bb13edfc8aa1b6e556ba
def get_transition_chempots(self, element): '\n Get the critical chemical potentials for an element in the Phase\n Diagram.\n\n Args:\n element: An element. Has to be in the PD in the first place.\n\n Returns:\n A sorted sequence of critical chemical potentials, from less\n negative to more negative.\n ' if (element not in self._pd.elements): raise ValueError('get_transition_chempots can only be called with elements in the phase diagram.') critical_chempots = [] for facet in self._pd.facets: chempots = self.get_facet_chempots(facet) critical_chempots.append(chempots[element]) clean_pots = [] for c in sorted(critical_chempots): if (len(clean_pots) == 0): clean_pots.append(c) elif (abs((c - clean_pots[(- 1)])) > PDAnalyzer.numerical_tol): clean_pots.append(c) clean_pots.reverse() return tuple(clean_pots)
Get the critical chemical potentials for an element in the Phase Diagram. Args: element: An element. Has to be in the PD in the first place. Returns: A sorted sequence of critical chemical potentials, from less negative to more negative.
pymatgen/phasediagram/analyzer.py
get_transition_chempots
adozier/pymatgen
0
python
def get_transition_chempots(self, element): '\n Get the critical chemical potentials for an element in the Phase\n Diagram.\n\n Args:\n element: An element. Has to be in the PD in the first place.\n\n Returns:\n A sorted sequence of critical chemical potentials, from less\n negative to more negative.\n ' if (element not in self._pd.elements): raise ValueError('get_transition_chempots can only be called with elements in the phase diagram.') critical_chempots = [] for facet in self._pd.facets: chempots = self.get_facet_chempots(facet) critical_chempots.append(chempots[element]) clean_pots = [] for c in sorted(critical_chempots): if (len(clean_pots) == 0): clean_pots.append(c) elif (abs((c - clean_pots[(- 1)])) > PDAnalyzer.numerical_tol): clean_pots.append(c) clean_pots.reverse() return tuple(clean_pots)
def get_transition_chempots(self, element): '\n Get the critical chemical potentials for an element in the Phase\n Diagram.\n\n Args:\n element: An element. Has to be in the PD in the first place.\n\n Returns:\n A sorted sequence of critical chemical potentials, from less\n negative to more negative.\n ' if (element not in self._pd.elements): raise ValueError('get_transition_chempots can only be called with elements in the phase diagram.') critical_chempots = [] for facet in self._pd.facets: chempots = self.get_facet_chempots(facet) critical_chempots.append(chempots[element]) clean_pots = [] for c in sorted(critical_chempots): if (len(clean_pots) == 0): clean_pots.append(c) elif (abs((c - clean_pots[(- 1)])) > PDAnalyzer.numerical_tol): clean_pots.append(c) clean_pots.reverse() return tuple(clean_pots)<|docstring|>Get the critical chemical potentials for an element in the Phase Diagram. Args: element: An element. Has to be in the PD in the first place. Returns: A sorted sequence of critical chemical potentials, from less negative to more negative.<|endoftext|>
fc51ef197c900f982158f7b3758081c78a337da24713f717559df1be3b33ec06
def get_element_profile(self, element, comp, comp_tol=1e-05): "\n Provides the element evolution data for a composition.\n For example, can be used to analyze Li conversion voltages by varying\n uLi and looking at the phases formed. Also can be used to analyze O2\n evolution by varying uO2.\n\n Args:\n element: An element. Must be in the phase diagram.\n comp: A Composition\n comp_tol: The tolerance to use when calculating decompositions.\n Phases with amounts less than this tolerance are excluded.\n Defaults to 1e-5.\n\n Returns:\n Evolution data as a list of dictionaries of the following format:\n [ {'chempot': -10.487582010000001, 'evolution': -2.0,\n 'reaction': Reaction Object], ...]\n " if (element not in self._pd.elements): raise ValueError('get_transition_chempots can only be called with elements in the phase diagram.') chempots = self.get_transition_chempots(element) stable_entries = self._pd.stable_entries gccomp = Composition({el: amt for (el, amt) in comp.items() if (el != element)}) elref = self._pd.el_refs[element] elcomp = Composition(element.symbol) prev_decomp = [] evolution = [] def are_same_decomp(decomp1, decomp2): for comp in decomp2: if (comp not in decomp1): return False return True for c in chempots: gcpd = GrandPotentialPhaseDiagram(stable_entries, {element: (c - 1e-05)}, self._pd.elements) analyzer = PDAnalyzer(gcpd) gcdecomp = analyzer.get_decomposition(gccomp) decomp = [gcentry.original_entry.composition for (gcentry, amt) in gcdecomp.items() if (amt > comp_tol)] decomp_entries = [gcentry.original_entry for (gcentry, amt) in gcdecomp.items() if (amt > comp_tol)] if (not are_same_decomp(prev_decomp, decomp)): if (elcomp not in decomp): decomp.insert(0, elcomp) rxn = Reaction([comp], decomp) rxn.normalize_to(comp) prev_decomp = decomp amt = (- rxn.coeffs[rxn.all_comp.index(elcomp)]) evolution.append({'chempot': c, 'evolution': amt, 'element_reference': elref, 'reaction': rxn, 'entries': decomp_entries}) return evolution
Provides the element evolution data for a composition. For example, can be used to analyze Li conversion voltages by varying uLi and looking at the phases formed. Also can be used to analyze O2 evolution by varying uO2. Args: element: An element. Must be in the phase diagram. comp: A Composition comp_tol: The tolerance to use when calculating decompositions. Phases with amounts less than this tolerance are excluded. Defaults to 1e-5. Returns: Evolution data as a list of dictionaries of the following format: [ {'chempot': -10.487582010000001, 'evolution': -2.0, 'reaction': Reaction Object], ...]
pymatgen/phasediagram/analyzer.py
get_element_profile
adozier/pymatgen
0
python
def get_element_profile(self, element, comp, comp_tol=1e-05): "\n Provides the element evolution data for a composition.\n For example, can be used to analyze Li conversion voltages by varying\n uLi and looking at the phases formed. Also can be used to analyze O2\n evolution by varying uO2.\n\n Args:\n element: An element. Must be in the phase diagram.\n comp: A Composition\n comp_tol: The tolerance to use when calculating decompositions.\n Phases with amounts less than this tolerance are excluded.\n Defaults to 1e-5.\n\n Returns:\n Evolution data as a list of dictionaries of the following format:\n [ {'chempot': -10.487582010000001, 'evolution': -2.0,\n 'reaction': Reaction Object], ...]\n " if (element not in self._pd.elements): raise ValueError('get_transition_chempots can only be called with elements in the phase diagram.') chempots = self.get_transition_chempots(element) stable_entries = self._pd.stable_entries gccomp = Composition({el: amt for (el, amt) in comp.items() if (el != element)}) elref = self._pd.el_refs[element] elcomp = Composition(element.symbol) prev_decomp = [] evolution = [] def are_same_decomp(decomp1, decomp2): for comp in decomp2: if (comp not in decomp1): return False return True for c in chempots: gcpd = GrandPotentialPhaseDiagram(stable_entries, {element: (c - 1e-05)}, self._pd.elements) analyzer = PDAnalyzer(gcpd) gcdecomp = analyzer.get_decomposition(gccomp) decomp = [gcentry.original_entry.composition for (gcentry, amt) in gcdecomp.items() if (amt > comp_tol)] decomp_entries = [gcentry.original_entry for (gcentry, amt) in gcdecomp.items() if (amt > comp_tol)] if (not are_same_decomp(prev_decomp, decomp)): if (elcomp not in decomp): decomp.insert(0, elcomp) rxn = Reaction([comp], decomp) rxn.normalize_to(comp) prev_decomp = decomp amt = (- rxn.coeffs[rxn.all_comp.index(elcomp)]) evolution.append({'chempot': c, 'evolution': amt, 'element_reference': elref, 'reaction': rxn, 'entries': decomp_entries}) return evolution
def get_element_profile(self, element, comp, comp_tol=1e-05): "\n Provides the element evolution data for a composition.\n For example, can be used to analyze Li conversion voltages by varying\n uLi and looking at the phases formed. Also can be used to analyze O2\n evolution by varying uO2.\n\n Args:\n element: An element. Must be in the phase diagram.\n comp: A Composition\n comp_tol: The tolerance to use when calculating decompositions.\n Phases with amounts less than this tolerance are excluded.\n Defaults to 1e-5.\n\n Returns:\n Evolution data as a list of dictionaries of the following format:\n [ {'chempot': -10.487582010000001, 'evolution': -2.0,\n 'reaction': Reaction Object], ...]\n " if (element not in self._pd.elements): raise ValueError('get_transition_chempots can only be called with elements in the phase diagram.') chempots = self.get_transition_chempots(element) stable_entries = self._pd.stable_entries gccomp = Composition({el: amt for (el, amt) in comp.items() if (el != element)}) elref = self._pd.el_refs[element] elcomp = Composition(element.symbol) prev_decomp = [] evolution = [] def are_same_decomp(decomp1, decomp2): for comp in decomp2: if (comp not in decomp1): return False return True for c in chempots: gcpd = GrandPotentialPhaseDiagram(stable_entries, {element: (c - 1e-05)}, self._pd.elements) analyzer = PDAnalyzer(gcpd) gcdecomp = analyzer.get_decomposition(gccomp) decomp = [gcentry.original_entry.composition for (gcentry, amt) in gcdecomp.items() if (amt > comp_tol)] decomp_entries = [gcentry.original_entry for (gcentry, amt) in gcdecomp.items() if (amt > comp_tol)] if (not are_same_decomp(prev_decomp, decomp)): if (elcomp not in decomp): decomp.insert(0, elcomp) rxn = Reaction([comp], decomp) rxn.normalize_to(comp) prev_decomp = decomp amt = (- rxn.coeffs[rxn.all_comp.index(elcomp)]) evolution.append({'chempot': c, 'evolution': amt, 'element_reference': elref, 'reaction': rxn, 'entries': decomp_entries}) return evolution<|docstring|>Provides the element evolution data for a composition. For example, can be used to analyze Li conversion voltages by varying uLi and looking at the phases formed. Also can be used to analyze O2 evolution by varying uO2. Args: element: An element. Must be in the phase diagram. comp: A Composition comp_tol: The tolerance to use when calculating decompositions. Phases with amounts less than this tolerance are excluded. Defaults to 1e-5. Returns: Evolution data as a list of dictionaries of the following format: [ {'chempot': -10.487582010000001, 'evolution': -2.0, 'reaction': Reaction Object], ...]<|endoftext|>
1783a1b2687ba230bf97288340e8c1d5de8e68d67eb889e4d8485d71b8fa48df
def get_chempot_range_map(self, elements, referenced=True, joggle=True, force_use_pyhull=False): '\n Returns a chemical potential range map for each stable entry.\n\n Args:\n elements: Sequence of elements to be considered as independent\n variables. E.g., if you want to show the stability ranges\n of all Li-Co-O phases wrt to uLi and uO, you will supply\n [Element("Li"), Element("O")]\n referenced: If True, gives the results with a reference being the\n energy of the elemental phase. If False, gives absolute values.\n joggle (boolean): Whether to joggle the input to avoid precision\n errors.\n force_use_pyhull (boolean): Whether the pyhull algorithm is always\n used, even when scipy is present.\n\n Returns:\n Returns a dict of the form {entry: [simplices]}. The list of\n simplices are the sides of the N-1 dim polytope bounding the\n allowable chemical potential range of each entry.\n ' all_chempots = [] pd = self._pd facets = pd.facets for facet in facets: chempots = self.get_facet_chempots(facet) all_chempots.append([chempots[el] for el in pd.elements]) inds = [pd.elements.index(el) for el in elements] el_energies = {el: 0.0 for el in elements} if referenced: el_energies = {el: pd.el_refs[el].energy_per_atom for el in elements} chempot_ranges = collections.defaultdict(list) vertices = [list(range(len(self._pd.elements)))] if (len(all_chempots) > len(self._pd.elements)): vertices = get_facets(all_chempots, joggle=joggle, force_use_pyhull=force_use_pyhull) for ufacet in vertices: for combi in itertools.combinations(ufacet, 2): data1 = facets[combi[0]] data2 = facets[combi[1]] common_ent_ind = set(data1).intersection(set(data2)) if (len(common_ent_ind) == len(elements)): common_entries = [pd.qhull_entries[i] for i in common_ent_ind] data = np.array([[(all_chempots[i][j] - el_energies[pd.elements[j]]) for j in inds] for i in combi]) sim = Simplex(data) for entry in common_entries: chempot_ranges[entry].append(sim) return chempot_ranges
Returns a chemical potential range map for each stable entry. Args: elements: Sequence of elements to be considered as independent variables. E.g., if you want to show the stability ranges of all Li-Co-O phases wrt to uLi and uO, you will supply [Element("Li"), Element("O")] referenced: If True, gives the results with a reference being the energy of the elemental phase. If False, gives absolute values. joggle (boolean): Whether to joggle the input to avoid precision errors. force_use_pyhull (boolean): Whether the pyhull algorithm is always used, even when scipy is present. Returns: Returns a dict of the form {entry: [simplices]}. The list of simplices are the sides of the N-1 dim polytope bounding the allowable chemical potential range of each entry.
pymatgen/phasediagram/analyzer.py
get_chempot_range_map
adozier/pymatgen
0
python
def get_chempot_range_map(self, elements, referenced=True, joggle=True, force_use_pyhull=False): '\n Returns a chemical potential range map for each stable entry.\n\n Args:\n elements: Sequence of elements to be considered as independent\n variables. E.g., if you want to show the stability ranges\n of all Li-Co-O phases wrt to uLi and uO, you will supply\n [Element("Li"), Element("O")]\n referenced: If True, gives the results with a reference being the\n energy of the elemental phase. If False, gives absolute values.\n joggle (boolean): Whether to joggle the input to avoid precision\n errors.\n force_use_pyhull (boolean): Whether the pyhull algorithm is always\n used, even when scipy is present.\n\n Returns:\n Returns a dict of the form {entry: [simplices]}. The list of\n simplices are the sides of the N-1 dim polytope bounding the\n allowable chemical potential range of each entry.\n ' all_chempots = [] pd = self._pd facets = pd.facets for facet in facets: chempots = self.get_facet_chempots(facet) all_chempots.append([chempots[el] for el in pd.elements]) inds = [pd.elements.index(el) for el in elements] el_energies = {el: 0.0 for el in elements} if referenced: el_energies = {el: pd.el_refs[el].energy_per_atom for el in elements} chempot_ranges = collections.defaultdict(list) vertices = [list(range(len(self._pd.elements)))] if (len(all_chempots) > len(self._pd.elements)): vertices = get_facets(all_chempots, joggle=joggle, force_use_pyhull=force_use_pyhull) for ufacet in vertices: for combi in itertools.combinations(ufacet, 2): data1 = facets[combi[0]] data2 = facets[combi[1]] common_ent_ind = set(data1).intersection(set(data2)) if (len(common_ent_ind) == len(elements)): common_entries = [pd.qhull_entries[i] for i in common_ent_ind] data = np.array([[(all_chempots[i][j] - el_energies[pd.elements[j]]) for j in inds] for i in combi]) sim = Simplex(data) for entry in common_entries: chempot_ranges[entry].append(sim) return chempot_ranges
def get_chempot_range_map(self, elements, referenced=True, joggle=True, force_use_pyhull=False): '\n Returns a chemical potential range map for each stable entry.\n\n Args:\n elements: Sequence of elements to be considered as independent\n variables. E.g., if you want to show the stability ranges\n of all Li-Co-O phases wrt to uLi and uO, you will supply\n [Element("Li"), Element("O")]\n referenced: If True, gives the results with a reference being the\n energy of the elemental phase. If False, gives absolute values.\n joggle (boolean): Whether to joggle the input to avoid precision\n errors.\n force_use_pyhull (boolean): Whether the pyhull algorithm is always\n used, even when scipy is present.\n\n Returns:\n Returns a dict of the form {entry: [simplices]}. The list of\n simplices are the sides of the N-1 dim polytope bounding the\n allowable chemical potential range of each entry.\n ' all_chempots = [] pd = self._pd facets = pd.facets for facet in facets: chempots = self.get_facet_chempots(facet) all_chempots.append([chempots[el] for el in pd.elements]) inds = [pd.elements.index(el) for el in elements] el_energies = {el: 0.0 for el in elements} if referenced: el_energies = {el: pd.el_refs[el].energy_per_atom for el in elements} chempot_ranges = collections.defaultdict(list) vertices = [list(range(len(self._pd.elements)))] if (len(all_chempots) > len(self._pd.elements)): vertices = get_facets(all_chempots, joggle=joggle, force_use_pyhull=force_use_pyhull) for ufacet in vertices: for combi in itertools.combinations(ufacet, 2): data1 = facets[combi[0]] data2 = facets[combi[1]] common_ent_ind = set(data1).intersection(set(data2)) if (len(common_ent_ind) == len(elements)): common_entries = [pd.qhull_entries[i] for i in common_ent_ind] data = np.array([[(all_chempots[i][j] - el_energies[pd.elements[j]]) for j in inds] for i in combi]) sim = Simplex(data) for entry in common_entries: chempot_ranges[entry].append(sim) return chempot_ranges<|docstring|>Returns a chemical potential range map for each stable entry. Args: elements: Sequence of elements to be considered as independent variables. E.g., if you want to show the stability ranges of all Li-Co-O phases wrt to uLi and uO, you will supply [Element("Li"), Element("O")] referenced: If True, gives the results with a reference being the energy of the elemental phase. If False, gives absolute values. joggle (boolean): Whether to joggle the input to avoid precision errors. force_use_pyhull (boolean): Whether the pyhull algorithm is always used, even when scipy is present. Returns: Returns a dict of the form {entry: [simplices]}. The list of simplices are the sides of the N-1 dim polytope bounding the allowable chemical potential range of each entry.<|endoftext|>
649b904141c4469aaa38e5e06a9bc9f4fa92a3e79427340c26c15a4b4200e16c
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=0.01): '\n returns a set of chemical potentials corresponding to the vertices of the simplex\n in the chemical potential phase diagram.\n The simplex is built using all elements in the target_composition except dep_elt.\n The chemical potential of dep_elt is computed from the target composition energy.\n This method is useful to get the limiting conditions for\n defects computations for instance.\n\n Args:\n target_comp: A Composition object\n dep_elt: the element for which the chemical potential is computed from the energy of\n the stable phase at the target composition\n tol_en: a tolerance on the energy to set\n\n Returns:\n [{Element:mu}]: An array of conditions on simplex vertices for\n which each element has a chemical potential set to a given\n value. "absolute" values (i.e., not referenced to element energies)\n ' muref = np.array([self._pd.el_refs[e].energy_per_atom for e in self._pd.elements if (e != dep_elt)]) chempot_ranges = self.get_chempot_range_map([e for e in self._pd.elements if (e != dep_elt)]) for e in self._pd.elements: if (not (e in target_comp.elements)): target_comp = (target_comp + Composition({e: 0.0})) coeff = [(- target_comp[e]) for e in self._pd.elements if (e != dep_elt)] for e in chempot_ranges.keys(): if (e.composition.reduced_composition == target_comp.reduced_composition): multiplicator = (e.composition[dep_elt] / target_comp[dep_elt]) ef = (e.energy / multiplicator) all_coords = [] for s in chempot_ranges[e]: for v in s._coords: elts = [e for e in self._pd.elements if (e != dep_elt)] res = {} for i in range(len(elts)): res[elts[i]] = (v[i] + muref[i]) res[dep_elt] = ((np.dot((v + muref), coeff) + ef) / target_comp[dep_elt]) already_in = False for di in all_coords: dict_equals = True for k in di: if (abs((di[k] - res[k])) > tol_en): dict_equals = False break if dict_equals: already_in = True break if (not already_in): all_coords.append(res) return all_coords
returns a set of chemical potentials corresponding to the vertices of the simplex in the chemical potential phase diagram. The simplex is built using all elements in the target_composition except dep_elt. The chemical potential of dep_elt is computed from the target composition energy. This method is useful to get the limiting conditions for defects computations for instance. Args: target_comp: A Composition object dep_elt: the element for which the chemical potential is computed from the energy of the stable phase at the target composition tol_en: a tolerance on the energy to set Returns: [{Element:mu}]: An array of conditions on simplex vertices for which each element has a chemical potential set to a given value. "absolute" values (i.e., not referenced to element energies)
pymatgen/phasediagram/analyzer.py
getmu_vertices_stability_phase
adozier/pymatgen
0
python
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=0.01): '\n returns a set of chemical potentials corresponding to the vertices of the simplex\n in the chemical potential phase diagram.\n The simplex is built using all elements in the target_composition except dep_elt.\n The chemical potential of dep_elt is computed from the target composition energy.\n This method is useful to get the limiting conditions for\n defects computations for instance.\n\n Args:\n target_comp: A Composition object\n dep_elt: the element for which the chemical potential is computed from the energy of\n the stable phase at the target composition\n tol_en: a tolerance on the energy to set\n\n Returns:\n [{Element:mu}]: An array of conditions on simplex vertices for\n which each element has a chemical potential set to a given\n value. "absolute" values (i.e., not referenced to element energies)\n ' muref = np.array([self._pd.el_refs[e].energy_per_atom for e in self._pd.elements if (e != dep_elt)]) chempot_ranges = self.get_chempot_range_map([e for e in self._pd.elements if (e != dep_elt)]) for e in self._pd.elements: if (not (e in target_comp.elements)): target_comp = (target_comp + Composition({e: 0.0})) coeff = [(- target_comp[e]) for e in self._pd.elements if (e != dep_elt)] for e in chempot_ranges.keys(): if (e.composition.reduced_composition == target_comp.reduced_composition): multiplicator = (e.composition[dep_elt] / target_comp[dep_elt]) ef = (e.energy / multiplicator) all_coords = [] for s in chempot_ranges[e]: for v in s._coords: elts = [e for e in self._pd.elements if (e != dep_elt)] res = {} for i in range(len(elts)): res[elts[i]] = (v[i] + muref[i]) res[dep_elt] = ((np.dot((v + muref), coeff) + ef) / target_comp[dep_elt]) already_in = False for di in all_coords: dict_equals = True for k in di: if (abs((di[k] - res[k])) > tol_en): dict_equals = False break if dict_equals: already_in = True break if (not already_in): all_coords.append(res) return all_coords
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=0.01): '\n returns a set of chemical potentials corresponding to the vertices of the simplex\n in the chemical potential phase diagram.\n The simplex is built using all elements in the target_composition except dep_elt.\n The chemical potential of dep_elt is computed from the target composition energy.\n This method is useful to get the limiting conditions for\n defects computations for instance.\n\n Args:\n target_comp: A Composition object\n dep_elt: the element for which the chemical potential is computed from the energy of\n the stable phase at the target composition\n tol_en: a tolerance on the energy to set\n\n Returns:\n [{Element:mu}]: An array of conditions on simplex vertices for\n which each element has a chemical potential set to a given\n value. "absolute" values (i.e., not referenced to element energies)\n ' muref = np.array([self._pd.el_refs[e].energy_per_atom for e in self._pd.elements if (e != dep_elt)]) chempot_ranges = self.get_chempot_range_map([e for e in self._pd.elements if (e != dep_elt)]) for e in self._pd.elements: if (not (e in target_comp.elements)): target_comp = (target_comp + Composition({e: 0.0})) coeff = [(- target_comp[e]) for e in self._pd.elements if (e != dep_elt)] for e in chempot_ranges.keys(): if (e.composition.reduced_composition == target_comp.reduced_composition): multiplicator = (e.composition[dep_elt] / target_comp[dep_elt]) ef = (e.energy / multiplicator) all_coords = [] for s in chempot_ranges[e]: for v in s._coords: elts = [e for e in self._pd.elements if (e != dep_elt)] res = {} for i in range(len(elts)): res[elts[i]] = (v[i] + muref[i]) res[dep_elt] = ((np.dot((v + muref), coeff) + ef) / target_comp[dep_elt]) already_in = False for di in all_coords: dict_equals = True for k in di: if (abs((di[k] - res[k])) > tol_en): dict_equals = False break if dict_equals: already_in = True break if (not already_in): all_coords.append(res) return all_coords<|docstring|>returns a set of chemical potentials corresponding to the vertices of the simplex in the chemical potential phase diagram. The simplex is built using all elements in the target_composition except dep_elt. The chemical potential of dep_elt is computed from the target composition energy. This method is useful to get the limiting conditions for defects computations for instance. Args: target_comp: A Composition object dep_elt: the element for which the chemical potential is computed from the energy of the stable phase at the target composition tol_en: a tolerance on the energy to set Returns: [{Element:mu}]: An array of conditions on simplex vertices for which each element has a chemical potential set to a given value. "absolute" values (i.e., not referenced to element energies)<|endoftext|>
f75ca5c074ccf741fc70eceed028316c4f77963c656848a42b8865e96b623c31
def get_chempot_range_stability_phase(self, target_comp, open_elt): '\n returns a set of chemical potentials correspoding to the max and min\n chemical potential of the open element for a given composition. It is\n quite common to have for instance a ternary oxide (e.g., ABO3) for\n which you want to know what are the A and B chemical potential leading\n to the highest and lowest oxygen chemical potential (reducing and\n oxidizing conditions). This is useful for defect computations.\n\n Args:\n target_comp: A Composition object\n open_elt: Element that you want to constrain to be max or min\n\n Returns:\n {Element:(mu_min,mu_max)}: Chemical potentials are given in\n "absolute" values (i.e., not referenced to 0)\n ' muref = np.array([self._pd.el_refs[e].energy_per_atom for e in self._pd.elements if (e != open_elt)]) chempot_ranges = self.get_chempot_range_map([e for e in self._pd.elements if (e != open_elt)]) for e in self._pd.elements: if (not (e in target_comp.elements)): target_comp = (target_comp + Composition({e: 0.0})) coeff = [(- target_comp[e]) for e in self._pd.elements if (e != open_elt)] max_open = (- float('inf')) min_open = float('inf') max_mus = None min_mus = None for e in chempot_ranges.keys(): if (e.composition.reduced_composition == target_comp.reduced_composition): multiplicator = (e.composition[open_elt] / target_comp[open_elt]) ef = (e.energy / multiplicator) all_coords = [] for s in chempot_ranges[e]: for v in s._coords: all_coords.append(v) if (((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) > max_open): max_open = ((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) max_mus = v if (((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) < min_open): min_open = ((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) min_mus = v elts = [e for e in self._pd.elements if (e != open_elt)] res = {} for i in range(len(elts)): res[elts[i]] = ((min_mus[i] + muref[i]), (max_mus[i] + muref[i])) res[open_elt] = (min_open, max_open) return res
returns a set of chemical potentials correspoding to the max and min chemical potential of the open element for a given composition. It is quite common to have for instance a ternary oxide (e.g., ABO3) for which you want to know what are the A and B chemical potential leading to the highest and lowest oxygen chemical potential (reducing and oxidizing conditions). This is useful for defect computations. Args: target_comp: A Composition object open_elt: Element that you want to constrain to be max or min Returns: {Element:(mu_min,mu_max)}: Chemical potentials are given in "absolute" values (i.e., not referenced to 0)
pymatgen/phasediagram/analyzer.py
get_chempot_range_stability_phase
adozier/pymatgen
0
python
def get_chempot_range_stability_phase(self, target_comp, open_elt): '\n returns a set of chemical potentials correspoding to the max and min\n chemical potential of the open element for a given composition. It is\n quite common to have for instance a ternary oxide (e.g., ABO3) for\n which you want to know what are the A and B chemical potential leading\n to the highest and lowest oxygen chemical potential (reducing and\n oxidizing conditions). This is useful for defect computations.\n\n Args:\n target_comp: A Composition object\n open_elt: Element that you want to constrain to be max or min\n\n Returns:\n {Element:(mu_min,mu_max)}: Chemical potentials are given in\n "absolute" values (i.e., not referenced to 0)\n ' muref = np.array([self._pd.el_refs[e].energy_per_atom for e in self._pd.elements if (e != open_elt)]) chempot_ranges = self.get_chempot_range_map([e for e in self._pd.elements if (e != open_elt)]) for e in self._pd.elements: if (not (e in target_comp.elements)): target_comp = (target_comp + Composition({e: 0.0})) coeff = [(- target_comp[e]) for e in self._pd.elements if (e != open_elt)] max_open = (- float('inf')) min_open = float('inf') max_mus = None min_mus = None for e in chempot_ranges.keys(): if (e.composition.reduced_composition == target_comp.reduced_composition): multiplicator = (e.composition[open_elt] / target_comp[open_elt]) ef = (e.energy / multiplicator) all_coords = [] for s in chempot_ranges[e]: for v in s._coords: all_coords.append(v) if (((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) > max_open): max_open = ((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) max_mus = v if (((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) < min_open): min_open = ((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) min_mus = v elts = [e for e in self._pd.elements if (e != open_elt)] res = {} for i in range(len(elts)): res[elts[i]] = ((min_mus[i] + muref[i]), (max_mus[i] + muref[i])) res[open_elt] = (min_open, max_open) return res
def get_chempot_range_stability_phase(self, target_comp, open_elt): '\n returns a set of chemical potentials correspoding to the max and min\n chemical potential of the open element for a given composition. It is\n quite common to have for instance a ternary oxide (e.g., ABO3) for\n which you want to know what are the A and B chemical potential leading\n to the highest and lowest oxygen chemical potential (reducing and\n oxidizing conditions). This is useful for defect computations.\n\n Args:\n target_comp: A Composition object\n open_elt: Element that you want to constrain to be max or min\n\n Returns:\n {Element:(mu_min,mu_max)}: Chemical potentials are given in\n "absolute" values (i.e., not referenced to 0)\n ' muref = np.array([self._pd.el_refs[e].energy_per_atom for e in self._pd.elements if (e != open_elt)]) chempot_ranges = self.get_chempot_range_map([e for e in self._pd.elements if (e != open_elt)]) for e in self._pd.elements: if (not (e in target_comp.elements)): target_comp = (target_comp + Composition({e: 0.0})) coeff = [(- target_comp[e]) for e in self._pd.elements if (e != open_elt)] max_open = (- float('inf')) min_open = float('inf') max_mus = None min_mus = None for e in chempot_ranges.keys(): if (e.composition.reduced_composition == target_comp.reduced_composition): multiplicator = (e.composition[open_elt] / target_comp[open_elt]) ef = (e.energy / multiplicator) all_coords = [] for s in chempot_ranges[e]: for v in s._coords: all_coords.append(v) if (((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) > max_open): max_open = ((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) max_mus = v if (((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) < min_open): min_open = ((np.dot((v + muref), coeff) + ef) / target_comp[open_elt]) min_mus = v elts = [e for e in self._pd.elements if (e != open_elt)] res = {} for i in range(len(elts)): res[elts[i]] = ((min_mus[i] + muref[i]), (max_mus[i] + muref[i])) res[open_elt] = (min_open, max_open) return res<|docstring|>returns a set of chemical potentials correspoding to the max and min chemical potential of the open element for a given composition. It is quite common to have for instance a ternary oxide (e.g., ABO3) for which you want to know what are the A and B chemical potential leading to the highest and lowest oxygen chemical potential (reducing and oxidizing conditions). This is useful for defect computations. Args: target_comp: A Composition object open_elt: Element that you want to constrain to be max or min Returns: {Element:(mu_min,mu_max)}: Chemical potentials are given in "absolute" values (i.e., not referenced to 0)<|endoftext|>
2951735b466a55eb0cfde8e0a5452fa5c2eb42ae8c9fdf09df7273a1e535a7ec
def _raise_warnings(self, warning_headers): "If 'headers' contains a 'Warning' header raise\n the warnings to be seen by the user. Takes an iterable\n of string values from any number of 'Warning' headers.\n " if (not warning_headers): return warning_messages = [] for header in warning_headers: matches = _WARNING_RE.findall(header) if matches: warning_messages.extend(matches) else: warning_messages.append(header) for message in warning_messages: warnings.warn(message, category=ElasticsearchDeprecationWarning, stacklevel=6)
If 'headers' contains a 'Warning' header raise the warnings to be seen by the user. Takes an iterable of string values from any number of 'Warning' headers.
data/virtualenv_1/Lib/site-packages/elasticsearch/connection/base.py
_raise_warnings
gabrieldelorean/Tcc-ciencia-de-dados-e-big-data
46
python
def _raise_warnings(self, warning_headers): "If 'headers' contains a 'Warning' header raise\n the warnings to be seen by the user. Takes an iterable\n of string values from any number of 'Warning' headers.\n " if (not warning_headers): return warning_messages = [] for header in warning_headers: matches = _WARNING_RE.findall(header) if matches: warning_messages.extend(matches) else: warning_messages.append(header) for message in warning_messages: warnings.warn(message, category=ElasticsearchDeprecationWarning, stacklevel=6)
def _raise_warnings(self, warning_headers): "If 'headers' contains a 'Warning' header raise\n the warnings to be seen by the user. Takes an iterable\n of string values from any number of 'Warning' headers.\n " if (not warning_headers): return warning_messages = [] for header in warning_headers: matches = _WARNING_RE.findall(header) if matches: warning_messages.extend(matches) else: warning_messages.append(header) for message in warning_messages: warnings.warn(message, category=ElasticsearchDeprecationWarning, stacklevel=6)<|docstring|>If 'headers' contains a 'Warning' header raise the warnings to be seen by the user. Takes an iterable of string values from any number of 'Warning' headers.<|endoftext|>
94fe24fa0332bac58a6d5675cd41d1905d1ac1ceb50523890271fdd73edd85e9
def log_request_success(self, method, full_url, path, body, status_code, response, duration): ' Log a successful API call. ' if body: try: body = body.decode('utf-8', 'ignore') except AttributeError: pass logger.info('%s %s [status:%s request:%.3fs]', method, full_url, status_code, duration) logger.debug('> %s', body) logger.debug('< %s', response) self._log_trace(method, path, body, status_code, response, duration)
Log a successful API call.
data/virtualenv_1/Lib/site-packages/elasticsearch/connection/base.py
log_request_success
gabrieldelorean/Tcc-ciencia-de-dados-e-big-data
46
python
def log_request_success(self, method, full_url, path, body, status_code, response, duration): ' ' if body: try: body = body.decode('utf-8', 'ignore') except AttributeError: pass logger.info('%s %s [status:%s request:%.3fs]', method, full_url, status_code, duration) logger.debug('> %s', body) logger.debug('< %s', response) self._log_trace(method, path, body, status_code, response, duration)
def log_request_success(self, method, full_url, path, body, status_code, response, duration): ' ' if body: try: body = body.decode('utf-8', 'ignore') except AttributeError: pass logger.info('%s %s [status:%s request:%.3fs]', method, full_url, status_code, duration) logger.debug('> %s', body) logger.debug('< %s', response) self._log_trace(method, path, body, status_code, response, duration)<|docstring|>Log a successful API call.<|endoftext|>
def51fa6c5443e71ae393266eac2a965de8243bc05f06ba26fa39dff11bf66b1
def log_request_fail(self, method, full_url, path, body, duration, status_code=None, response=None, exception=None): ' Log an unsuccessful API call. ' if ((method == 'HEAD') and (status_code == 404)): return logger.warning('%s %s [status:%s request:%.3fs]', method, full_url, (status_code or 'N/A'), duration, exc_info=(exception is not None)) if body: try: body = body.decode('utf-8', 'ignore') except AttributeError: pass logger.debug('> %s', body) self._log_trace(method, path, body, status_code, response, duration) if (response is not None): logger.debug('< %s', response)
Log an unsuccessful API call.
data/virtualenv_1/Lib/site-packages/elasticsearch/connection/base.py
log_request_fail
gabrieldelorean/Tcc-ciencia-de-dados-e-big-data
46
python
def log_request_fail(self, method, full_url, path, body, duration, status_code=None, response=None, exception=None): ' ' if ((method == 'HEAD') and (status_code == 404)): return logger.warning('%s %s [status:%s request:%.3fs]', method, full_url, (status_code or 'N/A'), duration, exc_info=(exception is not None)) if body: try: body = body.decode('utf-8', 'ignore') except AttributeError: pass logger.debug('> %s', body) self._log_trace(method, path, body, status_code, response, duration) if (response is not None): logger.debug('< %s', response)
def log_request_fail(self, method, full_url, path, body, duration, status_code=None, response=None, exception=None): ' ' if ((method == 'HEAD') and (status_code == 404)): return logger.warning('%s %s [status:%s request:%.3fs]', method, full_url, (status_code or 'N/A'), duration, exc_info=(exception is not None)) if body: try: body = body.decode('utf-8', 'ignore') except AttributeError: pass logger.debug('> %s', body) self._log_trace(method, path, body, status_code, response, duration) if (response is not None): logger.debug('< %s', response)<|docstring|>Log an unsuccessful API call.<|endoftext|>
42febbf9c1c77d34ca40da51b3b323050e36589e4b7fa8dd837782d4dd3fd960
def _raise_error(self, status_code, raw_data): ' Locate appropriate exception and raise it. ' error_message = raw_data additional_info = None try: if raw_data: additional_info = json.loads(raw_data) error_message = additional_info.get('error', error_message) if (isinstance(error_message, dict) and ('type' in error_message)): error_message = error_message['type'] except (ValueError, TypeError) as err: logger.warning('Undecodable raw error response from server: %s', err) raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)
Locate appropriate exception and raise it.
data/virtualenv_1/Lib/site-packages/elasticsearch/connection/base.py
_raise_error
gabrieldelorean/Tcc-ciencia-de-dados-e-big-data
46
python
def _raise_error(self, status_code, raw_data): ' ' error_message = raw_data additional_info = None try: if raw_data: additional_info = json.loads(raw_data) error_message = additional_info.get('error', error_message) if (isinstance(error_message, dict) and ('type' in error_message)): error_message = error_message['type'] except (ValueError, TypeError) as err: logger.warning('Undecodable raw error response from server: %s', err) raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)
def _raise_error(self, status_code, raw_data): ' ' error_message = raw_data additional_info = None try: if raw_data: additional_info = json.loads(raw_data) error_message = additional_info.get('error', error_message) if (isinstance(error_message, dict) and ('type' in error_message)): error_message = error_message['type'] except (ValueError, TypeError) as err: logger.warning('Undecodable raw error response from server: %s', err) raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)<|docstring|>Locate appropriate exception and raise it.<|endoftext|>
f6b9786259f1689c1aeb160bd5466804941a2026a71249cbcd9592014cc83aa3
def _get_api_key_header_val(self, api_key): '\n Check the type of the passed api_key and return the correct header value\n for the `API Key authentication <https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`\n :arg api_key, either a tuple or a base64 encoded string\n ' if isinstance(api_key, (tuple, list)): s = '{0}:{1}'.format(api_key[0], api_key[1]).encode('utf-8') return ('ApiKey ' + binascii.b2a_base64(s).rstrip(b'\r\n').decode('utf-8')) return ('ApiKey ' + api_key)
Check the type of the passed api_key and return the correct header value for the `API Key authentication <https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>` :arg api_key, either a tuple or a base64 encoded string
data/virtualenv_1/Lib/site-packages/elasticsearch/connection/base.py
_get_api_key_header_val
gabrieldelorean/Tcc-ciencia-de-dados-e-big-data
46
python
def _get_api_key_header_val(self, api_key): '\n Check the type of the passed api_key and return the correct header value\n for the `API Key authentication <https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`\n :arg api_key, either a tuple or a base64 encoded string\n ' if isinstance(api_key, (tuple, list)): s = '{0}:{1}'.format(api_key[0], api_key[1]).encode('utf-8') return ('ApiKey ' + binascii.b2a_base64(s).rstrip(b'\r\n').decode('utf-8')) return ('ApiKey ' + api_key)
def _get_api_key_header_val(self, api_key): '\n Check the type of the passed api_key and return the correct header value\n for the `API Key authentication <https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`\n :arg api_key, either a tuple or a base64 encoded string\n ' if isinstance(api_key, (tuple, list)): s = '{0}:{1}'.format(api_key[0], api_key[1]).encode('utf-8') return ('ApiKey ' + binascii.b2a_base64(s).rstrip(b'\r\n').decode('utf-8')) return ('ApiKey ' + api_key)<|docstring|>Check the type of the passed api_key and return the correct header value for the `API Key authentication <https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>` :arg api_key, either a tuple or a base64 encoded string<|endoftext|>
65c28f4fa56523df7786a36d2cc8dcfcb8eee273253592ea965c09485a4fd7ec
def __init__(self): 'Constructeur du paramètre.' Parametre.__init__(self, 'installer', 'install') self.schema = '<cle>' self.aide_courte = 'installe un décor' self.aide_longue = "Cette commande permet d'installer un décor dans la salle où vous vous trouvez."
Constructeur du paramètre.
src/primaires/salle/commandes/decor/installer.py
__init__
stormi/tsunami
0
python
def __init__(self): Parametre.__init__(self, 'installer', 'install') self.schema = '<cle>' self.aide_courte = 'installe un décor' self.aide_longue = "Cette commande permet d'installer un décor dans la salle où vous vous trouvez."
def __init__(self): Parametre.__init__(self, 'installer', 'install') self.schema = '<cle>' self.aide_courte = 'installe un décor' self.aide_longue = "Cette commande permet d'installer un décor dans la salle où vous vous trouvez."<|docstring|>Constructeur du paramètre.<|endoftext|>
aaca4d107ed517f1a0b5fad178201f56c9d41dc71eb289832c5c006cb0163a05
def interpreter(self, personnage, dic_masques): "Méthode d'interprétation de commande" cle = dic_masques['cle'].cle try: decor = importeur.salle.decors[cle] except KeyError: (personnage << '|err|Ce décor {} est inconnu.|ff|'.format(cle)) else: personnage.salle.ajouter_decor(decor) (personnage << '{} a bien été installé dans la salle.'.format(decor.nom_singulier.capitalize()))
Méthode d'interprétation de commande
src/primaires/salle/commandes/decor/installer.py
interpreter
stormi/tsunami
0
python
def interpreter(self, personnage, dic_masques): cle = dic_masques['cle'].cle try: decor = importeur.salle.decors[cle] except KeyError: (personnage << '|err|Ce décor {} est inconnu.|ff|'.format(cle)) else: personnage.salle.ajouter_decor(decor) (personnage << '{} a bien été installé dans la salle.'.format(decor.nom_singulier.capitalize()))
def interpreter(self, personnage, dic_masques): cle = dic_masques['cle'].cle try: decor = importeur.salle.decors[cle] except KeyError: (personnage << '|err|Ce décor {} est inconnu.|ff|'.format(cle)) else: personnage.salle.ajouter_decor(decor) (personnage << '{} a bien été installé dans la salle.'.format(decor.nom_singulier.capitalize()))<|docstring|>Méthode d'interprétation de commande<|endoftext|>
7993c7b9b27d12a34b749e896b72fe3ef8b744fd7aadb6925ab027dade90692d
def fuzzify(s, u): '\n Sentence fuzzifier.\n Computes membership vector for the sentence S with respect to the\n universe U\n :param s: list of word embeddings for the sentence\n :param u: the universe matrix U with shape (K, d)\n :return: membership vectors for the sentence\n ' f_s = np.dot(s, u.T) m_s = np.max(f_s, axis=0) m_s = np.maximum(m_s, 0, m_s) return m_s
Sentence fuzzifier. Computes membership vector for the sentence S with respect to the universe U :param s: list of word embeddings for the sentence :param u: the universe matrix U with shape (K, d) :return: membership vectors for the sentence
similarity/fuzzy.py
fuzzify
Babylonpartners/fuzzymax
25
python
def fuzzify(s, u): '\n Sentence fuzzifier.\n Computes membership vector for the sentence S with respect to the\n universe U\n :param s: list of word embeddings for the sentence\n :param u: the universe matrix U with shape (K, d)\n :return: membership vectors for the sentence\n ' f_s = np.dot(s, u.T) m_s = np.max(f_s, axis=0) m_s = np.maximum(m_s, 0, m_s) return m_s
def fuzzify(s, u): '\n Sentence fuzzifier.\n Computes membership vector for the sentence S with respect to the\n universe U\n :param s: list of word embeddings for the sentence\n :param u: the universe matrix U with shape (K, d)\n :return: membership vectors for the sentence\n ' f_s = np.dot(s, u.T) m_s = np.max(f_s, axis=0) m_s = np.maximum(m_s, 0, m_s) return m_s<|docstring|>Sentence fuzzifier. Computes membership vector for the sentence S with respect to the universe U :param s: list of word embeddings for the sentence :param u: the universe matrix U with shape (K, d) :return: membership vectors for the sentence<|endoftext|>
8e3ffe0cd0adc10f7c391a59fa47f2a9c233b7024b2f2462b2910e4232c7aeae
def dynamax_jaccard(x, y): '\n DynaMax-Jaccard similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union)
DynaMax-Jaccard similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences
similarity/fuzzy.py
dynamax_jaccard
Babylonpartners/fuzzymax
25
python
def dynamax_jaccard(x, y): '\n DynaMax-Jaccard similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union)
def dynamax_jaccard(x, y): '\n DynaMax-Jaccard similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union)<|docstring|>DynaMax-Jaccard similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences<|endoftext|>
5b9c2fc1564768df3bf53740815b758744f3865e725f6d776548ad22173819d5
def dynamax_otsuka(x, y): '\n DynaMax-Otsuka similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_x_card = np.sum(m_x) m_y_card = np.sum(m_y) return (m_inter / np.sqrt((m_x_card * m_y_card)))
DynaMax-Otsuka similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences
similarity/fuzzy.py
dynamax_otsuka
Babylonpartners/fuzzymax
25
python
def dynamax_otsuka(x, y): '\n DynaMax-Otsuka similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_x_card = np.sum(m_x) m_y_card = np.sum(m_y) return (m_inter / np.sqrt((m_x_card * m_y_card)))
def dynamax_otsuka(x, y): '\n DynaMax-Otsuka similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_x_card = np.sum(m_x) m_y_card = np.sum(m_y) return (m_inter / np.sqrt((m_x_card * m_y_card)))<|docstring|>DynaMax-Otsuka similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences<|endoftext|>
774682fb2bae23828a26dfc79473f7854d9e810301e6ad0e72964c23a25598fd
def dynamax_dice(x, y): '\n DynaMax-Dice similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) f_inter = np.sum(np.minimum(m_x, m_y)) m_x_card = np.sum(m_x) m_y_card = np.sum(m_y) return ((2 * f_inter) / (m_x_card + m_y_card))
DynaMax-Dice similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences
similarity/fuzzy.py
dynamax_dice
Babylonpartners/fuzzymax
25
python
def dynamax_dice(x, y): '\n DynaMax-Dice similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) f_inter = np.sum(np.minimum(m_x, m_y)) m_x_card = np.sum(m_x) m_y_card = np.sum(m_y) return ((2 * f_inter) / (m_x_card + m_y_card))
def dynamax_dice(x, y): '\n DynaMax-Dice similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' u = np.vstack((x, y)) m_x = fuzzify(x, u) m_y = fuzzify(y, u) f_inter = np.sum(np.minimum(m_x, m_y)) m_x_card = np.sum(m_x) m_y_card = np.sum(m_y) return ((2 * f_inter) / (m_x_card + m_y_card))<|docstring|>DynaMax-Dice similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences<|endoftext|>
e72ad81f438b2e43421a134cd91b3c5f0f3c1592e0f38bb2be2d15dac0798148
def max_jaccard(x, y): '\n MaxPool-Jaccard similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' m_x = np.max(x, axis=0) m_x = np.maximum(m_x, 0, m_x) m_y = np.max(y, axis=0) m_y = np.maximum(m_y, 0, m_y) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union)
MaxPool-Jaccard similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences
similarity/fuzzy.py
max_jaccard
Babylonpartners/fuzzymax
25
python
def max_jaccard(x, y): '\n MaxPool-Jaccard similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' m_x = np.max(x, axis=0) m_x = np.maximum(m_x, 0, m_x) m_y = np.max(y, axis=0) m_y = np.maximum(m_y, 0, m_y) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union)
def max_jaccard(x, y): '\n MaxPool-Jaccard similarity measure between two sentences\n :param x: list of word embeddings for the first sentence\n :param y: list of word embeddings for the second sentence\n :return: similarity score between the two sentences\n ' m_x = np.max(x, axis=0) m_x = np.maximum(m_x, 0, m_x) m_y = np.max(y, axis=0) m_y = np.maximum(m_y, 0, m_y) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union)<|docstring|>MaxPool-Jaccard similarity measure between two sentences :param x: list of word embeddings for the first sentence :param y: list of word embeddings for the second sentence :return: similarity score between the two sentences<|endoftext|>
76a2a0c3dc5d2a2bc87722e67ccf3fd9cd0b1c04d3371508f3a74dc43a103eeb
def fbow_jaccard_factory(u): '\n Factory for building FBoW-Jaccard similarity measures\n with the custom universe matrix U\n :param u: the universe matrix U\n :return: similarity function\n ' def u_jaccard(x, y): m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union) return u_jaccard
Factory for building FBoW-Jaccard similarity measures with the custom universe matrix U :param u: the universe matrix U :return: similarity function
similarity/fuzzy.py
fbow_jaccard_factory
Babylonpartners/fuzzymax
25
python
def fbow_jaccard_factory(u): '\n Factory for building FBoW-Jaccard similarity measures\n with the custom universe matrix U\n :param u: the universe matrix U\n :return: similarity function\n ' def u_jaccard(x, y): m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union) return u_jaccard
def fbow_jaccard_factory(u): '\n Factory for building FBoW-Jaccard similarity measures\n with the custom universe matrix U\n :param u: the universe matrix U\n :return: similarity function\n ' def u_jaccard(x, y): m_x = fuzzify(x, u) m_y = fuzzify(y, u) m_inter = np.sum(np.minimum(m_x, m_y)) m_union = np.sum(np.maximum(m_x, m_y)) return (m_inter / m_union) return u_jaccard<|docstring|>Factory for building FBoW-Jaccard similarity measures with the custom universe matrix U :param u: the universe matrix U :return: similarity function<|endoftext|>
94e35bd5226520806238d804e066f71529ad61937bc273bb5cb070fc4c3c4d95
def get_color(key: str, ctype: ColorType=None) -> str: '\n Returns color code (from colorama) by string shortcut or full string.\n\n :param key: color shortcut key or full string\n :type key: str\n :param ctype: color type (fore/back/style), defaults to None\n :param ctype: ColorType, optional\n :return: color code from colorama\n :rtype: str\n ' if (ctype is None): ctype = ColorType.FORE def _get_color(key: str, ctype: str, kmap: typing.Dict) -> str: if (key in kmap): return kmap[key] m = getattr(colorama, ctype) try: return getattr(m, key.upper()) except AttributeError: raise KeyError(f'unknown color {key} of type {ctype}') if (ctype == ColorType.FORE): return _get_color(key, 'Fore', COLORED_FORE_DESCRIPTION_MAP) elif (ctype == ColorType.BACK): return _get_color(key, 'Back', COLORED_BACK_DESCRIPTION_MAP) elif (ctype == ColorType.STYLE): return _get_color(key, 'Style', COLORED_STYLE_DESCRIPTION_MAP) else: raise ValueError(f'unknown color type {ctype}')
Returns color code (from colorama) by string shortcut or full string. :param key: color shortcut key or full string :type key: str :param ctype: color type (fore/back/style), defaults to None :param ctype: ColorType, optional :return: color code from colorama :rtype: str
pyshart/color.py
get_color
DanielSolomon/blot
0
python
def get_color(key: str, ctype: ColorType=None) -> str: '\n Returns color code (from colorama) by string shortcut or full string.\n\n :param key: color shortcut key or full string\n :type key: str\n :param ctype: color type (fore/back/style), defaults to None\n :param ctype: ColorType, optional\n :return: color code from colorama\n :rtype: str\n ' if (ctype is None): ctype = ColorType.FORE def _get_color(key: str, ctype: str, kmap: typing.Dict) -> str: if (key in kmap): return kmap[key] m = getattr(colorama, ctype) try: return getattr(m, key.upper()) except AttributeError: raise KeyError(f'unknown color {key} of type {ctype}') if (ctype == ColorType.FORE): return _get_color(key, 'Fore', COLORED_FORE_DESCRIPTION_MAP) elif (ctype == ColorType.BACK): return _get_color(key, 'Back', COLORED_BACK_DESCRIPTION_MAP) elif (ctype == ColorType.STYLE): return _get_color(key, 'Style', COLORED_STYLE_DESCRIPTION_MAP) else: raise ValueError(f'unknown color type {ctype}')
def get_color(key: str, ctype: ColorType=None) -> str: '\n Returns color code (from colorama) by string shortcut or full string.\n\n :param key: color shortcut key or full string\n :type key: str\n :param ctype: color type (fore/back/style), defaults to None\n :param ctype: ColorType, optional\n :return: color code from colorama\n :rtype: str\n ' if (ctype is None): ctype = ColorType.FORE def _get_color(key: str, ctype: str, kmap: typing.Dict) -> str: if (key in kmap): return kmap[key] m = getattr(colorama, ctype) try: return getattr(m, key.upper()) except AttributeError: raise KeyError(f'unknown color {key} of type {ctype}') if (ctype == ColorType.FORE): return _get_color(key, 'Fore', COLORED_FORE_DESCRIPTION_MAP) elif (ctype == ColorType.BACK): return _get_color(key, 'Back', COLORED_BACK_DESCRIPTION_MAP) elif (ctype == ColorType.STYLE): return _get_color(key, 'Style', COLORED_STYLE_DESCRIPTION_MAP) else: raise ValueError(f'unknown color type {ctype}')<|docstring|>Returns color code (from colorama) by string shortcut or full string. :param key: color shortcut key or full string :type key: str :param ctype: color type (fore/back/style), defaults to None :param ctype: ColorType, optional :return: color code from colorama :rtype: str<|endoftext|>
9fa8ff69b059fec4502c331f34e9ec70c665ea785efef10f5e0ccc1024f41e91
async def wait_until(predicate, success_description, timeout=10): "Copied from PyMongo's test.utils.wait_until.\n\n Wait up to 10 seconds (by default) for predicate to be true. The\n predicate must be an awaitable.\n\n Returns the predicate's first true value.\n " start = time.time() interval = min((float(timeout) / 100), 0.1) while True: retval = (await predicate()) if retval: return retval if ((time.time() - start) > timeout): raise AssertionError(("Didn't ever %s" % success_description)) time.sleep(interval)
Copied from PyMongo's test.utils.wait_until. Wait up to 10 seconds (by default) for predicate to be true. The predicate must be an awaitable. Returns the predicate's first true value.
test/py35utils.py
wait_until
kirantambe/motor
1,717
python
async def wait_until(predicate, success_description, timeout=10): "Copied from PyMongo's test.utils.wait_until.\n\n Wait up to 10 seconds (by default) for predicate to be true. The\n predicate must be an awaitable.\n\n Returns the predicate's first true value.\n " start = time.time() interval = min((float(timeout) / 100), 0.1) while True: retval = (await predicate()) if retval: return retval if ((time.time() - start) > timeout): raise AssertionError(("Didn't ever %s" % success_description)) time.sleep(interval)
async def wait_until(predicate, success_description, timeout=10): "Copied from PyMongo's test.utils.wait_until.\n\n Wait up to 10 seconds (by default) for predicate to be true. The\n predicate must be an awaitable.\n\n Returns the predicate's first true value.\n " start = time.time() interval = min((float(timeout) / 100), 0.1) while True: retval = (await predicate()) if retval: return retval if ((time.time() - start) > timeout): raise AssertionError(("Didn't ever %s" % success_description)) time.sleep(interval)<|docstring|>Copied from PyMongo's test.utils.wait_until. Wait up to 10 seconds (by default) for predicate to be true. The predicate must be an awaitable. Returns the predicate's first true value.<|endoftext|>
39e4a520e70254eb4f359f53741c18233de3046b1804a542f33d2d035deff833
def threadsafe_generator(f): '\n A decorator that takes a generator function and makes it thread-safe.\n ' @wraps(f) def g(*a, **kw): return ThreadSafeIter(f(*a, **kw)) return g
A decorator that takes a generator function and makes it thread-safe.
airtest/utils/threadsafe.py
threadsafe_generator
suijianming/airtest
6,140
python
def threadsafe_generator(f): '\n \n ' @wraps(f) def g(*a, **kw): return ThreadSafeIter(f(*a, **kw)) return g
def threadsafe_generator(f): '\n \n ' @wraps(f) def g(*a, **kw): return ThreadSafeIter(f(*a, **kw)) return g<|docstring|>A decorator that takes a generator function and makes it thread-safe.<|endoftext|>
edc9c1ae1d734c956ce9eb7723efe501cd7fba4b056aad7177ee4744a860f855
async def maybe_coroutine(function: Callable, *args, **kwargs) -> Any: '\n |coro|\n\n Returns the return value of the function.\n\n :param Callable function: The function to call.\n :param args: The arguments.\n :param kwargs: The key arguments:\n :return: The value.\n :rtype: Any\n ' value = function(*args, **kwargs) if inspect.isawaitable(value): return (await value) return value
|coro| Returns the return value of the function. :param Callable function: The function to call. :param args: The arguments. :param kwargs: The key arguments: :return: The value. :rtype: Any
discordSuperUtils/base.py
maybe_coroutine
Bainble0211/discord-super-utils
91
python
async def maybe_coroutine(function: Callable, *args, **kwargs) -> Any: '\n |coro|\n\n Returns the return value of the function.\n\n :param Callable function: The function to call.\n :param args: The arguments.\n :param kwargs: The key arguments:\n :return: The value.\n :rtype: Any\n ' value = function(*args, **kwargs) if inspect.isawaitable(value): return (await value) return value
async def maybe_coroutine(function: Callable, *args, **kwargs) -> Any: '\n |coro|\n\n Returns the return value of the function.\n\n :param Callable function: The function to call.\n :param args: The arguments.\n :param kwargs: The key arguments:\n :return: The value.\n :rtype: Any\n ' value = function(*args, **kwargs) if inspect.isawaitable(value): return (await value) return value<|docstring|>|coro| Returns the return value of the function. :param Callable function: The function to call. :param args: The arguments. :param kwargs: The key arguments: :return: The value. :rtype: Any<|endoftext|>
30a257552cfebb8e48a694205b9f9434e5896b0e5802b0b7ea3a6408c0798f3c
def get_generator_response(generator: Any, generator_type: Any, *args, **kwargs) -> Any: '\n Returns the generator response with the arguments.\n\n :param generator: The generator to get the response from.\n :type generator: Any\n :param generator_type: The generator type. (Should be same as the generator type.\n :type generator_type: Any\n :param args: The arguments of the generator.\n :param kwargs: The key arguments of the generator\n :return: The generator response.\n :rtype: Any\n ' if (inspect.isclass(generator) and issubclass(generator, generator_type)): if inspect.ismethod(generator.generate): return generator.generate(*args, **kwargs) return generator().generate(*args, **kwargs) if isinstance(generator, generator_type): return generator.generate(*args, **kwargs) raise InvalidGenerator(generator)
Returns the generator response with the arguments. :param generator: The generator to get the response from. :type generator: Any :param generator_type: The generator type. (Should be same as the generator type. :type generator_type: Any :param args: The arguments of the generator. :param kwargs: The key arguments of the generator :return: The generator response. :rtype: Any
discordSuperUtils/base.py
get_generator_response
Bainble0211/discord-super-utils
91
python
def get_generator_response(generator: Any, generator_type: Any, *args, **kwargs) -> Any: '\n Returns the generator response with the arguments.\n\n :param generator: The generator to get the response from.\n :type generator: Any\n :param generator_type: The generator type. (Should be same as the generator type.\n :type generator_type: Any\n :param args: The arguments of the generator.\n :param kwargs: The key arguments of the generator\n :return: The generator response.\n :rtype: Any\n ' if (inspect.isclass(generator) and issubclass(generator, generator_type)): if inspect.ismethod(generator.generate): return generator.generate(*args, **kwargs) return generator().generate(*args, **kwargs) if isinstance(generator, generator_type): return generator.generate(*args, **kwargs) raise InvalidGenerator(generator)
def get_generator_response(generator: Any, generator_type: Any, *args, **kwargs) -> Any: '\n Returns the generator response with the arguments.\n\n :param generator: The generator to get the response from.\n :type generator: Any\n :param generator_type: The generator type. (Should be same as the generator type.\n :type generator_type: Any\n :param args: The arguments of the generator.\n :param kwargs: The key arguments of the generator\n :return: The generator response.\n :rtype: Any\n ' if (inspect.isclass(generator) and issubclass(generator, generator_type)): if inspect.ismethod(generator.generate): return generator.generate(*args, **kwargs) return generator().generate(*args, **kwargs) if isinstance(generator, generator_type): return generator.generate(*args, **kwargs) raise InvalidGenerator(generator)<|docstring|>Returns the generator response with the arguments. :param generator: The generator to get the response from. :type generator: Any :param generator_type: The generator type. (Should be same as the generator type. :type generator_type: Any :param args: The arguments of the generator. :param kwargs: The key arguments of the generator :return: The generator response. :rtype: Any<|endoftext|>
4bdefbcd332bd3e60c6e544b5d1c2a86ff65a5556d194c1d6f5b5c785702cdca
def generate_column_types(types: Iterable[str], database_type: Any) -> Optional[List[str]]: '\n Generates the column type names that are suitable for the database type.\n\n :param types: The column types.\n :type types: Iterable[str]\n :param database_type: The database type.\n :type database_type: Any\n :return: The suitable column types for the database types.\n :rtype: Optional[List[str]]\n ' database_type_configuration = COLUMN_TYPES.get(database_type) if (database_type_configuration is None): return return [database_type_configuration[x] for x in types]
Generates the column type names that are suitable for the database type. :param types: The column types. :type types: Iterable[str] :param database_type: The database type. :type database_type: Any :return: The suitable column types for the database types. :rtype: Optional[List[str]]
discordSuperUtils/base.py
generate_column_types
Bainble0211/discord-super-utils
91
python
def generate_column_types(types: Iterable[str], database_type: Any) -> Optional[List[str]]: '\n Generates the column type names that are suitable for the database type.\n\n :param types: The column types.\n :type types: Iterable[str]\n :param database_type: The database type.\n :type database_type: Any\n :return: The suitable column types for the database types.\n :rtype: Optional[List[str]]\n ' database_type_configuration = COLUMN_TYPES.get(database_type) if (database_type_configuration is None): return return [database_type_configuration[x] for x in types]
def generate_column_types(types: Iterable[str], database_type: Any) -> Optional[List[str]]: '\n Generates the column type names that are suitable for the database type.\n\n :param types: The column types.\n :type types: Iterable[str]\n :param database_type: The database type.\n :type database_type: Any\n :return: The suitable column types for the database types.\n :rtype: Optional[List[str]]\n ' database_type_configuration = COLUMN_TYPES.get(database_type) if (database_type_configuration is None): return return [database_type_configuration[x] for x in types]<|docstring|>Generates the column type names that are suitable for the database type. :param types: The column types. :type types: Iterable[str] :param database_type: The database type. :type database_type: Any :return: The suitable column types for the database types. :rtype: Optional[List[str]]<|endoftext|>
e1f3715dd785c9444e512f4e5030b3914090f7ab0c00cec9d427e36b3abc1940
async def questionnaire(ctx: commands.Context, questions: Iterable[Union[(str, discord.Embed)]], public: bool=False, timeout: Union[(float, int)]=30, member: discord.Member=None) -> Tuple[(List[str], bool)]: '\n |coro|\n\n Questions the member using a "quiz" and returns the answers.\n The questionnaire can be used without a specific member and be public.\n If no member was passed and the questionnaire public argument is true, a ValueError will be raised.\n\n :raises: ValueError: The questionnaire is private and no member was provided.\n :param ctx: The context (where the questionnaire will ask the questions).\n :type ctx: commands.Context\n :param questions: The questions the questionnaire will ask.\n :type questions: Iterable[Union[str, discord.Embed]]\n :param public: A bool indicating if the questionnaire is public.\n :type public: bool\n :param timeout: The number of seconds until the questionnaire will stop and time out.\n :type timeout: Union[float, int]\n :param member: The member the questionnaire will get the answers from.\n :type member: discord.Member\n :return: The answers and a boolean indicating if the questionnaire timed out.\n :rtype: Tuple[List[str], bool]\n ' answers = [] timed_out = False if ((not public) and (not member)): raise ValueError('The questionnaire is private and no member was provided.') def checks(msg): return ((msg.channel == ctx.channel) if public else ((msg.channel == ctx.channel) and (msg.author == member))) for question in questions: if isinstance(question, str): (await ctx.send(question)) elif isinstance(question, discord.Embed): (await ctx.send(embed=question)) else: raise TypeError("Question must be of type 'str' or 'discord.Embed'.") try: message = (await ctx.bot.wait_for('message', check=checks, timeout=timeout)) except asyncio.TimeoutError: timed_out = True break answers.append(message.content) return (answers, timed_out)
|coro| Questions the member using a "quiz" and returns the answers. The questionnaire can be used without a specific member and be public. If no member was passed and the questionnaire public argument is true, a ValueError will be raised. :raises: ValueError: The questionnaire is private and no member was provided. :param ctx: The context (where the questionnaire will ask the questions). :type ctx: commands.Context :param questions: The questions the questionnaire will ask. :type questions: Iterable[Union[str, discord.Embed]] :param public: A bool indicating if the questionnaire is public. :type public: bool :param timeout: The number of seconds until the questionnaire will stop and time out. :type timeout: Union[float, int] :param member: The member the questionnaire will get the answers from. :type member: discord.Member :return: The answers and a boolean indicating if the questionnaire timed out. :rtype: Tuple[List[str], bool]
discordSuperUtils/base.py
questionnaire
Bainble0211/discord-super-utils
91
python
async def questionnaire(ctx: commands.Context, questions: Iterable[Union[(str, discord.Embed)]], public: bool=False, timeout: Union[(float, int)]=30, member: discord.Member=None) -> Tuple[(List[str], bool)]: '\n |coro|\n\n Questions the member using a "quiz" and returns the answers.\n The questionnaire can be used without a specific member and be public.\n If no member was passed and the questionnaire public argument is true, a ValueError will be raised.\n\n :raises: ValueError: The questionnaire is private and no member was provided.\n :param ctx: The context (where the questionnaire will ask the questions).\n :type ctx: commands.Context\n :param questions: The questions the questionnaire will ask.\n :type questions: Iterable[Union[str, discord.Embed]]\n :param public: A bool indicating if the questionnaire is public.\n :type public: bool\n :param timeout: The number of seconds until the questionnaire will stop and time out.\n :type timeout: Union[float, int]\n :param member: The member the questionnaire will get the answers from.\n :type member: discord.Member\n :return: The answers and a boolean indicating if the questionnaire timed out.\n :rtype: Tuple[List[str], bool]\n ' answers = [] timed_out = False if ((not public) and (not member)): raise ValueError('The questionnaire is private and no member was provided.') def checks(msg): return ((msg.channel == ctx.channel) if public else ((msg.channel == ctx.channel) and (msg.author == member))) for question in questions: if isinstance(question, str): (await ctx.send(question)) elif isinstance(question, discord.Embed): (await ctx.send(embed=question)) else: raise TypeError("Question must be of type 'str' or 'discord.Embed'.") try: message = (await ctx.bot.wait_for('message', check=checks, timeout=timeout)) except asyncio.TimeoutError: timed_out = True break answers.append(message.content) return (answers, timed_out)
async def questionnaire(ctx: commands.Context, questions: Iterable[Union[(str, discord.Embed)]], public: bool=False, timeout: Union[(float, int)]=30, member: discord.Member=None) -> Tuple[(List[str], bool)]: '\n |coro|\n\n Questions the member using a "quiz" and returns the answers.\n The questionnaire can be used without a specific member and be public.\n If no member was passed and the questionnaire public argument is true, a ValueError will be raised.\n\n :raises: ValueError: The questionnaire is private and no member was provided.\n :param ctx: The context (where the questionnaire will ask the questions).\n :type ctx: commands.Context\n :param questions: The questions the questionnaire will ask.\n :type questions: Iterable[Union[str, discord.Embed]]\n :param public: A bool indicating if the questionnaire is public.\n :type public: bool\n :param timeout: The number of seconds until the questionnaire will stop and time out.\n :type timeout: Union[float, int]\n :param member: The member the questionnaire will get the answers from.\n :type member: discord.Member\n :return: The answers and a boolean indicating if the questionnaire timed out.\n :rtype: Tuple[List[str], bool]\n ' answers = [] timed_out = False if ((not public) and (not member)): raise ValueError('The questionnaire is private and no member was provided.') def checks(msg): return ((msg.channel == ctx.channel) if public else ((msg.channel == ctx.channel) and (msg.author == member))) for question in questions: if isinstance(question, str): (await ctx.send(question)) elif isinstance(question, discord.Embed): (await ctx.send(embed=question)) else: raise TypeError("Question must be of type 'str' or 'discord.Embed'.") try: message = (await ctx.bot.wait_for('message', check=checks, timeout=timeout)) except asyncio.TimeoutError: timed_out = True break answers.append(message.content) return (answers, timed_out)<|docstring|>|coro| Questions the member using a "quiz" and returns the answers. The questionnaire can be used without a specific member and be public. If no member was passed and the questionnaire public argument is true, a ValueError will be raised. :raises: ValueError: The questionnaire is private and no member was provided. :param ctx: The context (where the questionnaire will ask the questions). :type ctx: commands.Context :param questions: The questions the questionnaire will ask. :type questions: Iterable[Union[str, discord.Embed]] :param public: A bool indicating if the questionnaire is public. :type public: bool :param timeout: The number of seconds until the questionnaire will stop and time out. :type timeout: Union[float, int] :param member: The member the questionnaire will get the answers from. :type member: discord.Member :return: The answers and a boolean indicating if the questionnaire timed out. :rtype: Tuple[List[str], bool]<|endoftext|>
66c3ce23800cadf1aff36666fbcca53a45f3618f7f4c6e1d80fc041149166d3a
def handle_task_exceptions(task: asyncio.Task) -> None: "\n Handles the task's exceptions.\n\n :param asyncio.Task task: The task.\n :return: None\n :rtype: None\n " try: task.result() except asyncio.CancelledError: pass except Exception as e: raise e
Handles the task's exceptions. :param asyncio.Task task: The task. :return: None :rtype: None
discordSuperUtils/base.py
handle_task_exceptions
Bainble0211/discord-super-utils
91
python
def handle_task_exceptions(task: asyncio.Task) -> None: "\n Handles the task's exceptions.\n\n :param asyncio.Task task: The task.\n :return: None\n :rtype: None\n " try: task.result() except asyncio.CancelledError: pass except Exception as e: raise e
def handle_task_exceptions(task: asyncio.Task) -> None: "\n Handles the task's exceptions.\n\n :param asyncio.Task task: The task.\n :return: None\n :rtype: None\n " try: task.result() except asyncio.CancelledError: pass except Exception as e: raise e<|docstring|>Handles the task's exceptions. :param asyncio.Task task: The task. :return: None :rtype: None<|endoftext|>
e7c599036cf5f197022518d87148a066428ae7b66a5766a421532c7e1fde5c54
def create_task(loop: asyncio.AbstractEventLoop, coroutine: Coroutine) -> None: '\n Creates a task and handles exceptions.\n\n :param asyncio.AbstractEventLoop loop: The loop to run the coroutine on.\n :param Coroutine coroutine: The coroutine.\n :return: None\n :rtype: None\n ' try: task = loop.create_task(coroutine) task.add_done_callback(handle_task_exceptions) except RuntimeError: pass
Creates a task and handles exceptions. :param asyncio.AbstractEventLoop loop: The loop to run the coroutine on. :param Coroutine coroutine: The coroutine. :return: None :rtype: None
discordSuperUtils/base.py
create_task
Bainble0211/discord-super-utils
91
python
def create_task(loop: asyncio.AbstractEventLoop, coroutine: Coroutine) -> None: '\n Creates a task and handles exceptions.\n\n :param asyncio.AbstractEventLoop loop: The loop to run the coroutine on.\n :param Coroutine coroutine: The coroutine.\n :return: None\n :rtype: None\n ' try: task = loop.create_task(coroutine) task.add_done_callback(handle_task_exceptions) except RuntimeError: pass
def create_task(loop: asyncio.AbstractEventLoop, coroutine: Coroutine) -> None: '\n Creates a task and handles exceptions.\n\n :param asyncio.AbstractEventLoop loop: The loop to run the coroutine on.\n :param Coroutine coroutine: The coroutine.\n :return: None\n :rtype: None\n ' try: task = loop.create_task(coroutine) task.add_done_callback(handle_task_exceptions) except RuntimeError: pass<|docstring|>Creates a task and handles exceptions. :param asyncio.AbstractEventLoop loop: The loop to run the coroutine on. :param Coroutine coroutine: The coroutine. :return: None :rtype: None<|endoftext|>
b055a6e011a5e2f3f2ee2566858655f86a585c21d6e7a0bc425f2553b597cfb0
async def __wipe_cache(self) -> None: '\n |coro|\n\n This function is responsible for wiping the member cache.\n\n :return: None\n :rtype: None\n ' while (not self.bot.is_closed()): (await asyncio.sleep(self.wipe_cache_delay.total_seconds())) self._cache = {}
|coro| This function is responsible for wiping the member cache. :return: None :rtype: None
discordSuperUtils/base.py
__wipe_cache
Bainble0211/discord-super-utils
91
python
async def __wipe_cache(self) -> None: '\n |coro|\n\n This function is responsible for wiping the member cache.\n\n :return: None\n :rtype: None\n ' while (not self.bot.is_closed()): (await asyncio.sleep(self.wipe_cache_delay.total_seconds())) self._cache = {}
async def __wipe_cache(self) -> None: '\n |coro|\n\n This function is responsible for wiping the member cache.\n\n :return: None\n :rtype: None\n ' while (not self.bot.is_closed()): (await asyncio.sleep(self.wipe_cache_delay.total_seconds())) self._cache = {}<|docstring|>|coro| This function is responsible for wiping the member cache. :return: None :rtype: None<|endoftext|>
c148ad1ca7b61be8da07ced68e30307d8f27e1d9ef57e5b494b1e85fd4cbdbf2
async def call_event(self, name: str, *args, **kwargs) -> None: '\n Calls the event name with the arguments\n\n :param name: The event name.\n :type name: str\n :param args: The arguments.\n :param kwargs: The key arguments.\n :return: None\n :rtype: None\n ' if (name in self.events): for event in self.events[name]: (await event(*args, **kwargs))
Calls the event name with the arguments :param name: The event name. :type name: str :param args: The arguments. :param kwargs: The key arguments. :return: None :rtype: None
discordSuperUtils/base.py
call_event
Bainble0211/discord-super-utils
91
python
async def call_event(self, name: str, *args, **kwargs) -> None: '\n Calls the event name with the arguments\n\n :param name: The event name.\n :type name: str\n :param args: The arguments.\n :param kwargs: The key arguments.\n :return: None\n :rtype: None\n ' if (name in self.events): for event in self.events[name]: (await event(*args, **kwargs))
async def call_event(self, name: str, *args, **kwargs) -> None: '\n Calls the event name with the arguments\n\n :param name: The event name.\n :type name: str\n :param args: The arguments.\n :param kwargs: The key arguments.\n :return: None\n :rtype: None\n ' if (name in self.events): for event in self.events[name]: (await event(*args, **kwargs))<|docstring|>Calls the event name with the arguments :param name: The event name. :type name: str :param args: The arguments. :param kwargs: The key arguments. :return: None :rtype: None<|endoftext|>
2cd1cca276f8faa778b41dffd53f8380f509ab4e6c9fe49cc236766de63d6c62
def event(self, name: str=None) -> Callable: '\n A decorator which adds an event listener.\n\n :param name: The event name.\n :type name: str\n :return: The inner function.\n :rtype: Callable\n ' def inner(func): self.add_event(func, name) return func return inner
A decorator which adds an event listener. :param name: The event name. :type name: str :return: The inner function. :rtype: Callable
discordSuperUtils/base.py
event
Bainble0211/discord-super-utils
91
python
def event(self, name: str=None) -> Callable: '\n A decorator which adds an event listener.\n\n :param name: The event name.\n :type name: str\n :return: The inner function.\n :rtype: Callable\n ' def inner(func): self.add_event(func, name) return func return inner
def event(self, name: str=None) -> Callable: '\n A decorator which adds an event listener.\n\n :param name: The event name.\n :type name: str\n :return: The inner function.\n :rtype: Callable\n ' def inner(func): self.add_event(func, name) return func return inner<|docstring|>A decorator which adds an event listener. :param name: The event name. :type name: str :return: The inner function. :rtype: Callable<|endoftext|>
2c60a019bcc7fe18194ddec54630b8d21a2949da6e1c285ac09c336c4bfc0c9e
def add_event(self, func: Callable, name: str=None) -> None: "\n Adds an event to the event dictionary.\n\n :param func: The event callback.\n :type func: Callable\n :param name: The event name.\n :type name: str\n :return: None\n :rtype: None\n :raises: TypeError: The listener isn't async.\n " name = (func.__name__ if (not name) else name) if (not asyncio.iscoroutinefunction(func)): raise TypeError('Listeners must be async.') if (name in self.events): self.events[name].append(func) else: self.events[name] = [func]
Adds an event to the event dictionary. :param func: The event callback. :type func: Callable :param name: The event name. :type name: str :return: None :rtype: None :raises: TypeError: The listener isn't async.
discordSuperUtils/base.py
add_event
Bainble0211/discord-super-utils
91
python
def add_event(self, func: Callable, name: str=None) -> None: "\n Adds an event to the event dictionary.\n\n :param func: The event callback.\n :type func: Callable\n :param name: The event name.\n :type name: str\n :return: None\n :rtype: None\n :raises: TypeError: The listener isn't async.\n " name = (func.__name__ if (not name) else name) if (not asyncio.iscoroutinefunction(func)): raise TypeError('Listeners must be async.') if (name in self.events): self.events[name].append(func) else: self.events[name] = [func]
def add_event(self, func: Callable, name: str=None) -> None: "\n Adds an event to the event dictionary.\n\n :param func: The event callback.\n :type func: Callable\n :param name: The event name.\n :type name: str\n :return: None\n :rtype: None\n :raises: TypeError: The listener isn't async.\n " name = (func.__name__ if (not name) else name) if (not asyncio.iscoroutinefunction(func)): raise TypeError('Listeners must be async.') if (name in self.events): self.events[name].append(func) else: self.events[name] = [func]<|docstring|>Adds an event to the event dictionary. :param func: The event callback. :type func: Callable :param name: The event name. :type name: str :return: None :rtype: None :raises: TypeError: The listener isn't async.<|endoftext|>
d73cc182573bd9c8e2f221887e13d71557692c2223908ba93bcf6d7d23acd514
def remove_event(self, func: Callable, name: str=None) -> None: '\n Removes an event from the event dictionary.\n\n :param func: The event callback.\n :type func: Callable\n :param name: The event name.\n :type name: str\n :return: None\n :rtype: None\n ' name = (func.__name__ if (not name) else name) if (name in self.events): self.events[name].remove(func)
Removes an event from the event dictionary. :param func: The event callback. :type func: Callable :param name: The event name. :type name: str :return: None :rtype: None
discordSuperUtils/base.py
remove_event
Bainble0211/discord-super-utils
91
python
def remove_event(self, func: Callable, name: str=None) -> None: '\n Removes an event from the event dictionary.\n\n :param func: The event callback.\n :type func: Callable\n :param name: The event name.\n :type name: str\n :return: None\n :rtype: None\n ' name = (func.__name__ if (not name) else name) if (name in self.events): self.events[name].remove(func)
def remove_event(self, func: Callable, name: str=None) -> None: '\n Removes an event from the event dictionary.\n\n :param func: The event callback.\n :type func: Callable\n :param name: The event name.\n :type name: str\n :return: None\n :rtype: None\n ' name = (func.__name__ if (not name) else name) if (name in self.events): self.events[name].remove(func)<|docstring|>Removes an event from the event dictionary. :param func: The event callback. :type func: Callable :param name: The event name. :type name: str :return: None :rtype: None<|endoftext|>
e46773cef4c9a42cf7a78c8f8ab922d44835a43dc70bdcc45f3be1454c8ef81d
@staticmethod def event(manager_type: Any) -> Callable: "\n Adds an event to the Cog event list.\n\n :param manager_type: The manager type of the event.\n :type manager_type: Any\n :rtype: Callable\n :return: The inner function.\n :raises: TypeError: The listener isn't async.\n " def decorator(func): if (not inspect.iscoroutinefunction(func)): raise TypeError('Listeners must be async.') func._listener_type = manager_type return func return decorator
Adds an event to the Cog event list. :param manager_type: The manager type of the event. :type manager_type: Any :rtype: Callable :return: The inner function. :raises: TypeError: The listener isn't async.
discordSuperUtils/base.py
event
Bainble0211/discord-super-utils
91
python
@staticmethod def event(manager_type: Any) -> Callable: "\n Adds an event to the Cog event list.\n\n :param manager_type: The manager type of the event.\n :type manager_type: Any\n :rtype: Callable\n :return: The inner function.\n :raises: TypeError: The listener isn't async.\n " def decorator(func): if (not inspect.iscoroutinefunction(func)): raise TypeError('Listeners must be async.') func._listener_type = manager_type return func return decorator
@staticmethod def event(manager_type: Any) -> Callable: "\n Adds an event to the Cog event list.\n\n :param manager_type: The manager type of the event.\n :type manager_type: Any\n :rtype: Callable\n :return: The inner function.\n :raises: TypeError: The listener isn't async.\n " def decorator(func): if (not inspect.iscoroutinefunction(func)): raise TypeError('Listeners must be async.') func._listener_type = manager_type return func return decorator<|docstring|>Adds an event to the Cog event list. :param manager_type: The manager type of the event. :type manager_type: Any :rtype: Callable :return: The inner function. :raises: TypeError: The listener isn't async.<|endoftext|>
3c6e973ffff2e6f4cd51fcada1e9653d8564a2a6f0ac357ca17a431bcd5e07c1
def _check_database(self, raise_error: bool=True) -> bool: '\n A function which checks if the database is connected.\n\n :param raise_error: A bool indicating if the function should raise an error if the database is not connected.\n :type raise_error: bool\n :rtype: bool\n :return: If the database is connected.\n :raises: DatabaseNotConnected: The database is not connected.\n ' if (not self.database): if raise_error: raise DatabaseNotConnected(f"Database not connected. Connect this manager to a database using 'connect_to_database'") return False return True
A function which checks if the database is connected. :param raise_error: A bool indicating if the function should raise an error if the database is not connected. :type raise_error: bool :rtype: bool :return: If the database is connected. :raises: DatabaseNotConnected: The database is not connected.
discordSuperUtils/base.py
_check_database
Bainble0211/discord-super-utils
91
python
def _check_database(self, raise_error: bool=True) -> bool: '\n A function which checks if the database is connected.\n\n :param raise_error: A bool indicating if the function should raise an error if the database is not connected.\n :type raise_error: bool\n :rtype: bool\n :return: If the database is connected.\n :raises: DatabaseNotConnected: The database is not connected.\n ' if (not self.database): if raise_error: raise DatabaseNotConnected(f"Database not connected. Connect this manager to a database using 'connect_to_database'") return False return True
def _check_database(self, raise_error: bool=True) -> bool: '\n A function which checks if the database is connected.\n\n :param raise_error: A bool indicating if the function should raise an error if the database is not connected.\n :type raise_error: bool\n :rtype: bool\n :return: If the database is connected.\n :raises: DatabaseNotConnected: The database is not connected.\n ' if (not self.database): if raise_error: raise DatabaseNotConnected(f"Database not connected. Connect this manager to a database using 'connect_to_database'") return False return True<|docstring|>A function which checks if the database is connected. :param raise_error: A bool indicating if the function should raise an error if the database is not connected. :type raise_error: bool :rtype: bool :return: If the database is connected. :raises: DatabaseNotConnected: The database is not connected.<|endoftext|>
743c1da30599bcec61ffc9df8b348fdaea1c277da3ee4c1034b82f0f6c4564d8
async def connect_to_database(self, database: Database, tables: List[str]=None) -> None: '\n Connects to the database.\n Calls on_database_connect when connected.\n\n :param database: The database to connect to.\n :type database: Database\n :param tables: The tables to create (incase they do not exist).\n :type tables: List[str]\n :rtype: None\n :return: None\n ' if ((not tables) or (len(tables) != len(self.table_identifiers))): tables = self.table_identifiers for (table, table_data, identifier) in zip(tables, self.tables_column_data, self.table_identifiers): types = generate_column_types(table_data.values(), type(database.database)) (await database.create_table(table, (dict(zip(list(table_data), types)) if types else None), True)) self.database = database self.tables[identifier] = table (await self.call_event('on_database_connect'))
Connects to the database. Calls on_database_connect when connected. :param database: The database to connect to. :type database: Database :param tables: The tables to create (incase they do not exist). :type tables: List[str] :rtype: None :return: None
discordSuperUtils/base.py
connect_to_database
Bainble0211/discord-super-utils
91
python
async def connect_to_database(self, database: Database, tables: List[str]=None) -> None: '\n Connects to the database.\n Calls on_database_connect when connected.\n\n :param database: The database to connect to.\n :type database: Database\n :param tables: The tables to create (incase they do not exist).\n :type tables: List[str]\n :rtype: None\n :return: None\n ' if ((not tables) or (len(tables) != len(self.table_identifiers))): tables = self.table_identifiers for (table, table_data, identifier) in zip(tables, self.tables_column_data, self.table_identifiers): types = generate_column_types(table_data.values(), type(database.database)) (await database.create_table(table, (dict(zip(list(table_data), types)) if types else None), True)) self.database = database self.tables[identifier] = table (await self.call_event('on_database_connect'))
async def connect_to_database(self, database: Database, tables: List[str]=None) -> None: '\n Connects to the database.\n Calls on_database_connect when connected.\n\n :param database: The database to connect to.\n :type database: Database\n :param tables: The tables to create (incase they do not exist).\n :type tables: List[str]\n :rtype: None\n :return: None\n ' if ((not tables) or (len(tables) != len(self.table_identifiers))): tables = self.table_identifiers for (table, table_data, identifier) in zip(tables, self.tables_column_data, self.table_identifiers): types = generate_column_types(table_data.values(), type(database.database)) (await database.create_table(table, (dict(zip(list(table_data), types)) if types else None), True)) self.database = database self.tables[identifier] = table (await self.call_event('on_database_connect'))<|docstring|>Connects to the database. Calls on_database_connect when connected. :param database: The database to connect to. :type database: Database :param tables: The tables to create (incase they do not exist). :type tables: List[str] :rtype: None :return: None<|endoftext|>
fad292730a501cf50ae900e4ec50e5e7dab27a8d02176314a743723f1e314814
def convert_scores_to_classes(scores, anomaly_ratio): '\n Converts list of scores to flags (0/1) - top anomalies are marked as 1.\n ' anomaly_cnt = int((len(scores) * anomaly_ratio)) anomaly_indices = np.array(scores).argsort()[(- anomaly_cnt):][::(- 1)] y_pred = np.zeros(len(scores)) np.put(y_pred, anomaly_indices, 1) return y_pred
Converts list of scores to flags (0/1) - top anomalies are marked as 1.
docker/flat/test_loda.py
convert_scores_to_classes
bergloman/ad_examples
0
python
def convert_scores_to_classes(scores, anomaly_ratio): '\n \n ' anomaly_cnt = int((len(scores) * anomaly_ratio)) anomaly_indices = np.array(scores).argsort()[(- anomaly_cnt):][::(- 1)] y_pred = np.zeros(len(scores)) np.put(y_pred, anomaly_indices, 1) return y_pred
def convert_scores_to_classes(scores, anomaly_ratio): '\n \n ' anomaly_cnt = int((len(scores) * anomaly_ratio)) anomaly_indices = np.array(scores).argsort()[(- anomaly_cnt):][::(- 1)] y_pred = np.zeros(len(scores)) np.put(y_pred, anomaly_indices, 1) return y_pred<|docstring|>Converts list of scores to flags (0/1) - top anomalies are marked as 1.<|endoftext|>
4886298f6036e7646c8000981a1803c6c9a17274e1855534cb34846cd52c2aac
def adiciona_inicio_fim(self, frase): '\n adiciona ao texto um início ou fim aleatoriamente selecionado\n ' valor = random.choice(self.lista_frases) hora_atual = int(datetime.datetime.now().hour) if (hora_atual <= 11): valor = valor.replace('[Bom dia]', 'Bom dia') elif (hora_atual >= 19): valor = valor.replace('[Bom dia]', 'Boa noite') else: valor = valor.replace('[Bom dia]', 'Boa tarde') if ('inicio' in valor): return (valor.replace('[inicio]', '').replace('[fim]', '') + frase) else: return (frase + valor.replace('[inicio]', '').replace('[fim]', ''))
adiciona ao texto um início ou fim aleatoriamente selecionado
discourse_ordering.py
adiciona_inicio_fim
YanSym/Amazonia_Azul_Reporter
2
python
def adiciona_inicio_fim(self, frase): '\n \n ' valor = random.choice(self.lista_frases) hora_atual = int(datetime.datetime.now().hour) if (hora_atual <= 11): valor = valor.replace('[Bom dia]', 'Bom dia') elif (hora_atual >= 19): valor = valor.replace('[Bom dia]', 'Boa noite') else: valor = valor.replace('[Bom dia]', 'Boa tarde') if ('inicio' in valor): return (valor.replace('[inicio]', ).replace('[fim]', ) + frase) else: return (frase + valor.replace('[inicio]', ).replace('[fim]', ))
def adiciona_inicio_fim(self, frase): '\n \n ' valor = random.choice(self.lista_frases) hora_atual = int(datetime.datetime.now().hour) if (hora_atual <= 11): valor = valor.replace('[Bom dia]', 'Bom dia') elif (hora_atual >= 19): valor = valor.replace('[Bom dia]', 'Boa noite') else: valor = valor.replace('[Bom dia]', 'Boa tarde') if ('inicio' in valor): return (valor.replace('[inicio]', ).replace('[fim]', ) + frase) else: return (frase + valor.replace('[inicio]', ).replace('[fim]', ))<|docstring|>adiciona ao texto um início ou fim aleatoriamente selecionado<|endoftext|>
2da9195b78317c4fd59e169d200f3f7000b1939234c2d9b6ef65168d89c8516a
def discourse_ordering(self, intent, frase): '\n aplica ordenação do discurso ao texto\n ' if ('[permuta]' in frase): frase_0 = frase.split('[0]')[0] frase_1 = frase.split('[0]')[1].split('[1]')[0] frase_2 = frase.split('[1]')[1].split('[2]')[0] frase_3 = frase.split('[2]')[1] lista_frases = [frase_1, frase_2, frase_3] random.shuffle(lista_frases) frase_final = ((((((frase_0 + lista_frases[0]) + ', a ') + lista_frases[1]) + ' e a ') + lista_frases[2]) + '.') frase_final = frase_final.replace('[permuta]', '').replace('[0]', '').replace('[1]', '').replace('[2]', '') return self.adiciona_inicio_fim(frase_final) else: return self.adiciona_inicio_fim(frase)
aplica ordenação do discurso ao texto
discourse_ordering.py
discourse_ordering
YanSym/Amazonia_Azul_Reporter
2
python
def discourse_ordering(self, intent, frase): '\n \n ' if ('[permuta]' in frase): frase_0 = frase.split('[0]')[0] frase_1 = frase.split('[0]')[1].split('[1]')[0] frase_2 = frase.split('[1]')[1].split('[2]')[0] frase_3 = frase.split('[2]')[1] lista_frases = [frase_1, frase_2, frase_3] random.shuffle(lista_frases) frase_final = ((((((frase_0 + lista_frases[0]) + ', a ') + lista_frases[1]) + ' e a ') + lista_frases[2]) + '.') frase_final = frase_final.replace('[permuta]', ).replace('[0]', ).replace('[1]', ).replace('[2]', ) return self.adiciona_inicio_fim(frase_final) else: return self.adiciona_inicio_fim(frase)
def discourse_ordering(self, intent, frase): '\n \n ' if ('[permuta]' in frase): frase_0 = frase.split('[0]')[0] frase_1 = frase.split('[0]')[1].split('[1]')[0] frase_2 = frase.split('[1]')[1].split('[2]')[0] frase_3 = frase.split('[2]')[1] lista_frases = [frase_1, frase_2, frase_3] random.shuffle(lista_frases) frase_final = ((((((frase_0 + lista_frases[0]) + ', a ') + lista_frases[1]) + ' e a ') + lista_frases[2]) + '.') frase_final = frase_final.replace('[permuta]', ).replace('[0]', ).replace('[1]', ).replace('[2]', ) return self.adiciona_inicio_fim(frase_final) else: return self.adiciona_inicio_fim(frase)<|docstring|>aplica ordenação do discurso ao texto<|endoftext|>
b5881e33ed048adb7ec5395045b72bdc6e08ed679e209744e37a8681bff1183f
def run(self): '\n Výkonná část programu\n ' raise NotImplementedError(('Do not create instance of abstract class ' + self.__class__.__name__))
Výkonná část programu
py/lib/cmdLine/processor.py
run
ivomarvan/ESP8266_RTC_memory_compression
1
python
def run(self): '\n \n ' raise NotImplementedError(('Do not create instance of abstract class ' + self.__class__.__name__))
def run(self): '\n \n ' raise NotImplementedError(('Do not create instance of abstract class ' + self.__class__.__name__))<|docstring|>Výkonná část programu<|endoftext|>
69a82dfd3b7192ceb1bf7ef210df56133807dd7ae41cfc8815b55405e0663672
def motor_on_then_off(): 'toggles the motor.' on_time = 2.5 off_time = 1.0 MOTOR.value = True time.sleep(on_time) MOTOR.value = False time.sleep(off_time)
toggles the motor.
MetroX_CircuitPython/motor/code.py
motor_on_then_off
Pikime/Adafruit_Learning_System_Guides
665
python
def motor_on_then_off(): on_time = 2.5 off_time = 1.0 MOTOR.value = True time.sleep(on_time) MOTOR.value = False time.sleep(off_time)
def motor_on_then_off(): on_time = 2.5 off_time = 1.0 MOTOR.value = True time.sleep(on_time) MOTOR.value = False time.sleep(off_time)<|docstring|>toggles the motor.<|endoftext|>
36ff1e4d31e90f205495d11a27e533cd80f82b163a4cee79d74023ac04077761
def get_user_info(request): '\n Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed\n and setup\n ' if (not has_flask_login): return if (not hasattr(current_app, 'login_manager')): return try: is_authenticated = current_user.is_authenticated() except AttributeError: return {} if is_authenticated: user_info = {'is_authenticated': True, 'is_anonymous': current_user.is_anonymous(), 'id': current_user.get_id()} if ('SENTRY_USER_ATTRS' in current_app.config): for attr in current_app.config['SENTRY_USER_ATTRS']: if hasattr(current_user, attr): user_info[attr] = getattr(current_user, attr) else: user_info = {'is_authenticated': False, 'is_anonymous': current_user.is_anonymous()} return user_info
Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed and setup
vendor-local/lib/python/raven/contrib/flask/utils.py
get_user_info
RAMilewski/airmozilla
1
python
def get_user_info(request): '\n Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed\n and setup\n ' if (not has_flask_login): return if (not hasattr(current_app, 'login_manager')): return try: is_authenticated = current_user.is_authenticated() except AttributeError: return {} if is_authenticated: user_info = {'is_authenticated': True, 'is_anonymous': current_user.is_anonymous(), 'id': current_user.get_id()} if ('SENTRY_USER_ATTRS' in current_app.config): for attr in current_app.config['SENTRY_USER_ATTRS']: if hasattr(current_user, attr): user_info[attr] = getattr(current_user, attr) else: user_info = {'is_authenticated': False, 'is_anonymous': current_user.is_anonymous()} return user_info
def get_user_info(request): '\n Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed\n and setup\n ' if (not has_flask_login): return if (not hasattr(current_app, 'login_manager')): return try: is_authenticated = current_user.is_authenticated() except AttributeError: return {} if is_authenticated: user_info = {'is_authenticated': True, 'is_anonymous': current_user.is_anonymous(), 'id': current_user.get_id()} if ('SENTRY_USER_ATTRS' in current_app.config): for attr in current_app.config['SENTRY_USER_ATTRS']: if hasattr(current_user, attr): user_info[attr] = getattr(current_user, attr) else: user_info = {'is_authenticated': False, 'is_anonymous': current_user.is_anonymous()} return user_info<|docstring|>Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed and setup<|endoftext|>
af4dc1235c18cf769f85a6f0b6e73c9df6eef762b85f4a7c4e4e2e7221a03d90
def _create_command(): '\n\n Creates PycharmTestCommand that inherits real Command class.\n Wrapped to method to make it is not called when module loaded but only when django fully initialized (lazy)\n\n ' class PycharmTestCommand(ManagementUtility().fetch_command('test').__class__): def get_runner(self): TEST_RUNNER = 'django_test_runner.run_tests' test_path = TEST_RUNNER.split('.') if (len(test_path) > 1): test_module_name = '.'.join(test_path[:(- 1)]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[(- 1)]) test_runner = getattr(test_module, test_path[(- 1)]) return test_runner def handle(self, *test_labels, **options): commands = management.get_commands() if (hasattr(settings, 'SOUTH_TESTS_MIGRATE') and (not settings.SOUTH_TESTS_MIGRATE)): commands['syncdb'] = 'django.core' elif ('south' in settings.INSTALLED_APPS): try: from south.management.commands import MigrateAndSyncCommand commands['syncdb'] = MigrateAndSyncCommand() from south.hacks import hacks if hasattr(hacks, 'patch_flush_during_test_db_creation'): hacks.patch_flush_during_test_db_creation() except ImportError: commands['syncdb'] = 'django.core' verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive', True) failfast = options.get('failfast', False) TestRunner = self.get_runner() if (not inspect.ismethod(TestRunner)): our_options = {'verbosity': int(verbosity), 'interactive': interactive, 'failfast': failfast} options.update(our_options) failures = TestRunner(test_labels, **options) else: test_runner = TestRunner(verbosity=verbosity, interactive=interactive, failfast=failfast) failures = test_runner.run_tests(test_labels) if failures: sys.exit(bool(failures)) return PycharmTestCommand()
Creates PycharmTestCommand that inherits real Command class. Wrapped to method to make it is not called when module loaded but only when django fully initialized (lazy)
python/helpers/pycharm/django_test_manage.py
_create_command
dunno99/intellij-community
2
python
def _create_command(): '\n\n Creates PycharmTestCommand that inherits real Command class.\n Wrapped to method to make it is not called when module loaded but only when django fully initialized (lazy)\n\n ' class PycharmTestCommand(ManagementUtility().fetch_command('test').__class__): def get_runner(self): TEST_RUNNER = 'django_test_runner.run_tests' test_path = TEST_RUNNER.split('.') if (len(test_path) > 1): test_module_name = '.'.join(test_path[:(- 1)]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[(- 1)]) test_runner = getattr(test_module, test_path[(- 1)]) return test_runner def handle(self, *test_labels, **options): commands = management.get_commands() if (hasattr(settings, 'SOUTH_TESTS_MIGRATE') and (not settings.SOUTH_TESTS_MIGRATE)): commands['syncdb'] = 'django.core' elif ('south' in settings.INSTALLED_APPS): try: from south.management.commands import MigrateAndSyncCommand commands['syncdb'] = MigrateAndSyncCommand() from south.hacks import hacks if hasattr(hacks, 'patch_flush_during_test_db_creation'): hacks.patch_flush_during_test_db_creation() except ImportError: commands['syncdb'] = 'django.core' verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive', True) failfast = options.get('failfast', False) TestRunner = self.get_runner() if (not inspect.ismethod(TestRunner)): our_options = {'verbosity': int(verbosity), 'interactive': interactive, 'failfast': failfast} options.update(our_options) failures = TestRunner(test_labels, **options) else: test_runner = TestRunner(verbosity=verbosity, interactive=interactive, failfast=failfast) failures = test_runner.run_tests(test_labels) if failures: sys.exit(bool(failures)) return PycharmTestCommand()
def _create_command(): '\n\n Creates PycharmTestCommand that inherits real Command class.\n Wrapped to method to make it is not called when module loaded but only when django fully initialized (lazy)\n\n ' class PycharmTestCommand(ManagementUtility().fetch_command('test').__class__): def get_runner(self): TEST_RUNNER = 'django_test_runner.run_tests' test_path = TEST_RUNNER.split('.') if (len(test_path) > 1): test_module_name = '.'.join(test_path[:(- 1)]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[(- 1)]) test_runner = getattr(test_module, test_path[(- 1)]) return test_runner def handle(self, *test_labels, **options): commands = management.get_commands() if (hasattr(settings, 'SOUTH_TESTS_MIGRATE') and (not settings.SOUTH_TESTS_MIGRATE)): commands['syncdb'] = 'django.core' elif ('south' in settings.INSTALLED_APPS): try: from south.management.commands import MigrateAndSyncCommand commands['syncdb'] = MigrateAndSyncCommand() from south.hacks import hacks if hasattr(hacks, 'patch_flush_during_test_db_creation'): hacks.patch_flush_during_test_db_creation() except ImportError: commands['syncdb'] = 'django.core' verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive', True) failfast = options.get('failfast', False) TestRunner = self.get_runner() if (not inspect.ismethod(TestRunner)): our_options = {'verbosity': int(verbosity), 'interactive': interactive, 'failfast': failfast} options.update(our_options) failures = TestRunner(test_labels, **options) else: test_runner = TestRunner(verbosity=verbosity, interactive=interactive, failfast=failfast) failures = test_runner.run_tests(test_labels) if failures: sys.exit(bool(failures)) return PycharmTestCommand()<|docstring|>Creates PycharmTestCommand that inherits real Command class. Wrapped to method to make it is not called when module loaded but only when django fully initialized (lazy)<|endoftext|>
010aedf45724ed3197ea1491dfce37c53d10d0947d8e7c1a26aa42f96067a5b4
def rolling_window(array, window=(0,), asteps=None, wsteps=None, axes=None, toend=True): 'Create a view of `array` which for every point gives the n-dimensional\n neighbourhood of size window. New dimensions are added at the end of\n `array` or after the corresponding original dimension.\n \n Parameters\n ----------\n array : array_like\n Array to which the rolling window is applied.\n window : int or tuple\n Either a single integer to create a window of only the last axis or a\n tuple to create it for the last len(window) axes. 0 can be used as a\n to ignore a dimension in the window.\n asteps : tuple\n Aligned at the last axis, new steps for the original array, ie. for\n creation of non-overlapping windows. (Equivalent to slicing result)\n wsteps : int or tuple (same size as window)\n steps for the added window dimensions. These can be 0 to repeat values\n along the axis.\n axes: int or tuple\n If given, must have the same size as window. In this case window is\n interpreted as the size in the dimension given by axes. IE. a window\n of (2, 1) is equivalent to window=2 and axis=-2. \n toend : bool\n If False, the new dimensions are right after the corresponding original\n dimension, instead of at the end of the array. Adding the new axes at the\n end makes it easier to get the neighborhood, however toend=False will give\n a more intuitive result if you view the whole array.\n \n Returns\n -------\n A view on `array` which is smaller to fit the windows and has windows added\n dimensions (0s not counting), ie. every point of `array` is an array of size\n window.\n ' array = np.asarray(array) orig_shape = np.asarray(array.shape) window = np.atleast_1d(window).astype(int) if (axes is not None): axes = np.atleast_1d(axes) w = np.zeros(array.ndim, dtype=int) for (axis, size) in zip(axes, window): w[axis] = size window = w if (window.ndim > 1): raise ValueError('`window` must be one-dimensional.') if np.any((window < 0)): raise ValueError('All elements of `window` must be larger then 1.') if (len(array.shape) < len(window)): raise ValueError('`window` length must be less or equal `array` dimension.') _asteps = np.ones_like(orig_shape) if (asteps is not None): asteps = np.atleast_1d(asteps) if (asteps.ndim != 1): raise ValueError('`asteps` must be either a scalar or one dimensional.') if (len(asteps) > array.ndim): raise ValueError('`asteps` cannot be longer then the `array` dimension.') _asteps[(- len(asteps)):] = asteps if np.any((asteps < 1)): raise ValueError('All elements of `asteps` must be larger then 1.') asteps = _asteps _wsteps = np.ones_like(window) if (wsteps is not None): wsteps = np.atleast_1d(wsteps) if (wsteps.shape != window.shape): raise ValueError('`wsteps` must have the same shape as `window`.') if np.any((wsteps < 0)): raise ValueError('All elements of `wsteps` must be larger then 0.') _wsteps[:] = wsteps _wsteps[(window == 0)] = 1 wsteps = _wsteps if np.any((orig_shape[(- len(window)):] < (window * wsteps))): raise ValueError('`window` * `wsteps` larger then `array` in at least one dimension.') new_shape = orig_shape _window = window.copy() _window[(_window == 0)] = 1 new_shape[(- len(window)):] += (wsteps - (_window * wsteps)) new_shape = (((new_shape + asteps) - 1) // asteps) new_shape[(new_shape < 1)] = 1 shape = new_shape strides = np.asarray(array.strides) strides *= asteps new_strides = (array.strides[(- len(window)):] * wsteps) if toend: new_shape = np.concatenate((shape, window)) new_strides = np.concatenate((strides, new_strides)) else: _ = np.zeros_like(shape) _[(- len(window)):] = window _window = _.copy() _[(- len(window)):] = new_strides _new_strides = _ new_shape = np.zeros((len(shape) * 2), dtype=int) new_strides = np.zeros((len(shape) * 2), dtype=int) new_shape[::2] = shape new_strides[::2] = strides new_shape[1::2] = _window new_strides[1::2] = _new_strides new_strides = new_strides[(new_shape != 0)] new_shape = new_shape[(new_shape != 0)] return np.lib.stride_tricks.as_strided(array, shape=new_shape, strides=new_strides)
Create a view of `array` which for every point gives the n-dimensional neighbourhood of size window. New dimensions are added at the end of `array` or after the corresponding original dimension. Parameters ---------- array : array_like Array to which the rolling window is applied. window : int or tuple Either a single integer to create a window of only the last axis or a tuple to create it for the last len(window) axes. 0 can be used as a to ignore a dimension in the window. asteps : tuple Aligned at the last axis, new steps for the original array, ie. for creation of non-overlapping windows. (Equivalent to slicing result) wsteps : int or tuple (same size as window) steps for the added window dimensions. These can be 0 to repeat values along the axis. axes: int or tuple If given, must have the same size as window. In this case window is interpreted as the size in the dimension given by axes. IE. a window of (2, 1) is equivalent to window=2 and axis=-2. toend : bool If False, the new dimensions are right after the corresponding original dimension, instead of at the end of the array. Adding the new axes at the end makes it easier to get the neighborhood, however toend=False will give a more intuitive result if you view the whole array. Returns ------- A view on `array` which is smaller to fit the windows and has windows added dimensions (0s not counting), ie. every point of `array` is an array of size window.
src/analysis_tools.py
rolling_window
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def rolling_window(array, window=(0,), asteps=None, wsteps=None, axes=None, toend=True): 'Create a view of `array` which for every point gives the n-dimensional\n neighbourhood of size window. New dimensions are added at the end of\n `array` or after the corresponding original dimension.\n \n Parameters\n ----------\n array : array_like\n Array to which the rolling window is applied.\n window : int or tuple\n Either a single integer to create a window of only the last axis or a\n tuple to create it for the last len(window) axes. 0 can be used as a\n to ignore a dimension in the window.\n asteps : tuple\n Aligned at the last axis, new steps for the original array, ie. for\n creation of non-overlapping windows. (Equivalent to slicing result)\n wsteps : int or tuple (same size as window)\n steps for the added window dimensions. These can be 0 to repeat values\n along the axis.\n axes: int or tuple\n If given, must have the same size as window. In this case window is\n interpreted as the size in the dimension given by axes. IE. a window\n of (2, 1) is equivalent to window=2 and axis=-2. \n toend : bool\n If False, the new dimensions are right after the corresponding original\n dimension, instead of at the end of the array. Adding the new axes at the\n end makes it easier to get the neighborhood, however toend=False will give\n a more intuitive result if you view the whole array.\n \n Returns\n -------\n A view on `array` which is smaller to fit the windows and has windows added\n dimensions (0s not counting), ie. every point of `array` is an array of size\n window.\n ' array = np.asarray(array) orig_shape = np.asarray(array.shape) window = np.atleast_1d(window).astype(int) if (axes is not None): axes = np.atleast_1d(axes) w = np.zeros(array.ndim, dtype=int) for (axis, size) in zip(axes, window): w[axis] = size window = w if (window.ndim > 1): raise ValueError('`window` must be one-dimensional.') if np.any((window < 0)): raise ValueError('All elements of `window` must be larger then 1.') if (len(array.shape) < len(window)): raise ValueError('`window` length must be less or equal `array` dimension.') _asteps = np.ones_like(orig_shape) if (asteps is not None): asteps = np.atleast_1d(asteps) if (asteps.ndim != 1): raise ValueError('`asteps` must be either a scalar or one dimensional.') if (len(asteps) > array.ndim): raise ValueError('`asteps` cannot be longer then the `array` dimension.') _asteps[(- len(asteps)):] = asteps if np.any((asteps < 1)): raise ValueError('All elements of `asteps` must be larger then 1.') asteps = _asteps _wsteps = np.ones_like(window) if (wsteps is not None): wsteps = np.atleast_1d(wsteps) if (wsteps.shape != window.shape): raise ValueError('`wsteps` must have the same shape as `window`.') if np.any((wsteps < 0)): raise ValueError('All elements of `wsteps` must be larger then 0.') _wsteps[:] = wsteps _wsteps[(window == 0)] = 1 wsteps = _wsteps if np.any((orig_shape[(- len(window)):] < (window * wsteps))): raise ValueError('`window` * `wsteps` larger then `array` in at least one dimension.') new_shape = orig_shape _window = window.copy() _window[(_window == 0)] = 1 new_shape[(- len(window)):] += (wsteps - (_window * wsteps)) new_shape = (((new_shape + asteps) - 1) // asteps) new_shape[(new_shape < 1)] = 1 shape = new_shape strides = np.asarray(array.strides) strides *= asteps new_strides = (array.strides[(- len(window)):] * wsteps) if toend: new_shape = np.concatenate((shape, window)) new_strides = np.concatenate((strides, new_strides)) else: _ = np.zeros_like(shape) _[(- len(window)):] = window _window = _.copy() _[(- len(window)):] = new_strides _new_strides = _ new_shape = np.zeros((len(shape) * 2), dtype=int) new_strides = np.zeros((len(shape) * 2), dtype=int) new_shape[::2] = shape new_strides[::2] = strides new_shape[1::2] = _window new_strides[1::2] = _new_strides new_strides = new_strides[(new_shape != 0)] new_shape = new_shape[(new_shape != 0)] return np.lib.stride_tricks.as_strided(array, shape=new_shape, strides=new_strides)
def rolling_window(array, window=(0,), asteps=None, wsteps=None, axes=None, toend=True): 'Create a view of `array` which for every point gives the n-dimensional\n neighbourhood of size window. New dimensions are added at the end of\n `array` or after the corresponding original dimension.\n \n Parameters\n ----------\n array : array_like\n Array to which the rolling window is applied.\n window : int or tuple\n Either a single integer to create a window of only the last axis or a\n tuple to create it for the last len(window) axes. 0 can be used as a\n to ignore a dimension in the window.\n asteps : tuple\n Aligned at the last axis, new steps for the original array, ie. for\n creation of non-overlapping windows. (Equivalent to slicing result)\n wsteps : int or tuple (same size as window)\n steps for the added window dimensions. These can be 0 to repeat values\n along the axis.\n axes: int or tuple\n If given, must have the same size as window. In this case window is\n interpreted as the size in the dimension given by axes. IE. a window\n of (2, 1) is equivalent to window=2 and axis=-2. \n toend : bool\n If False, the new dimensions are right after the corresponding original\n dimension, instead of at the end of the array. Adding the new axes at the\n end makes it easier to get the neighborhood, however toend=False will give\n a more intuitive result if you view the whole array.\n \n Returns\n -------\n A view on `array` which is smaller to fit the windows and has windows added\n dimensions (0s not counting), ie. every point of `array` is an array of size\n window.\n ' array = np.asarray(array) orig_shape = np.asarray(array.shape) window = np.atleast_1d(window).astype(int) if (axes is not None): axes = np.atleast_1d(axes) w = np.zeros(array.ndim, dtype=int) for (axis, size) in zip(axes, window): w[axis] = size window = w if (window.ndim > 1): raise ValueError('`window` must be one-dimensional.') if np.any((window < 0)): raise ValueError('All elements of `window` must be larger then 1.') if (len(array.shape) < len(window)): raise ValueError('`window` length must be less or equal `array` dimension.') _asteps = np.ones_like(orig_shape) if (asteps is not None): asteps = np.atleast_1d(asteps) if (asteps.ndim != 1): raise ValueError('`asteps` must be either a scalar or one dimensional.') if (len(asteps) > array.ndim): raise ValueError('`asteps` cannot be longer then the `array` dimension.') _asteps[(- len(asteps)):] = asteps if np.any((asteps < 1)): raise ValueError('All elements of `asteps` must be larger then 1.') asteps = _asteps _wsteps = np.ones_like(window) if (wsteps is not None): wsteps = np.atleast_1d(wsteps) if (wsteps.shape != window.shape): raise ValueError('`wsteps` must have the same shape as `window`.') if np.any((wsteps < 0)): raise ValueError('All elements of `wsteps` must be larger then 0.') _wsteps[:] = wsteps _wsteps[(window == 0)] = 1 wsteps = _wsteps if np.any((orig_shape[(- len(window)):] < (window * wsteps))): raise ValueError('`window` * `wsteps` larger then `array` in at least one dimension.') new_shape = orig_shape _window = window.copy() _window[(_window == 0)] = 1 new_shape[(- len(window)):] += (wsteps - (_window * wsteps)) new_shape = (((new_shape + asteps) - 1) // asteps) new_shape[(new_shape < 1)] = 1 shape = new_shape strides = np.asarray(array.strides) strides *= asteps new_strides = (array.strides[(- len(window)):] * wsteps) if toend: new_shape = np.concatenate((shape, window)) new_strides = np.concatenate((strides, new_strides)) else: _ = np.zeros_like(shape) _[(- len(window)):] = window _window = _.copy() _[(- len(window)):] = new_strides _new_strides = _ new_shape = np.zeros((len(shape) * 2), dtype=int) new_strides = np.zeros((len(shape) * 2), dtype=int) new_shape[::2] = shape new_strides[::2] = strides new_shape[1::2] = _window new_strides[1::2] = _new_strides new_strides = new_strides[(new_shape != 0)] new_shape = new_shape[(new_shape != 0)] return np.lib.stride_tricks.as_strided(array, shape=new_shape, strides=new_strides)<|docstring|>Create a view of `array` which for every point gives the n-dimensional neighbourhood of size window. New dimensions are added at the end of `array` or after the corresponding original dimension. Parameters ---------- array : array_like Array to which the rolling window is applied. window : int or tuple Either a single integer to create a window of only the last axis or a tuple to create it for the last len(window) axes. 0 can be used as a to ignore a dimension in the window. asteps : tuple Aligned at the last axis, new steps for the original array, ie. for creation of non-overlapping windows. (Equivalent to slicing result) wsteps : int or tuple (same size as window) steps for the added window dimensions. These can be 0 to repeat values along the axis. axes: int or tuple If given, must have the same size as window. In this case window is interpreted as the size in the dimension given by axes. IE. a window of (2, 1) is equivalent to window=2 and axis=-2. toend : bool If False, the new dimensions are right after the corresponding original dimension, instead of at the end of the array. Adding the new axes at the end makes it easier to get the neighborhood, however toend=False will give a more intuitive result if you view the whole array. Returns ------- A view on `array` which is smaller to fit the windows and has windows added dimensions (0s not counting), ie. every point of `array` is an array of size window.<|endoftext|>
51d5668b6c798002467fcecce50c54f4b60d0c26fe860a47171909e10537b295
def rolling_window_multichannel(array, window=(0,), stride=None, channel_last=True, agg=True): '\n performs the rolling window function for many channels\n ' if (not channel_last): array = np.moveaxis(array, 0, (- 1)) (w, h, c) = array.shape out_shape = (np.floor((((w - window[0]) / stride[0]) + 1)).astype(int), np.floor((((h - window[1]) / stride[1]) + 1)).astype(int)) if (not agg): mc_array = np.empty((out_shape[0], out_shape[1], window[0], window[1], c)).astype('float32') for i in range(c): mc_array[(..., i)] = rolling_window(array[(..., i)], window=(window[0], window[1]), asteps=stride) else: mc_array = np.empty((out_shape[0], out_shape[1], c)).astype('float32') for i in range(c): mc_array[(..., i)] = np.mean(rolling_window(array[(..., i)], window=(window[0], window[1]), asteps=stride), axis=(2, 3)) return mc_array
performs the rolling window function for many channels
src/analysis_tools.py
rolling_window_multichannel
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def rolling_window_multichannel(array, window=(0,), stride=None, channel_last=True, agg=True): '\n \n ' if (not channel_last): array = np.moveaxis(array, 0, (- 1)) (w, h, c) = array.shape out_shape = (np.floor((((w - window[0]) / stride[0]) + 1)).astype(int), np.floor((((h - window[1]) / stride[1]) + 1)).astype(int)) if (not agg): mc_array = np.empty((out_shape[0], out_shape[1], window[0], window[1], c)).astype('float32') for i in range(c): mc_array[(..., i)] = rolling_window(array[(..., i)], window=(window[0], window[1]), asteps=stride) else: mc_array = np.empty((out_shape[0], out_shape[1], c)).astype('float32') for i in range(c): mc_array[(..., i)] = np.mean(rolling_window(array[(..., i)], window=(window[0], window[1]), asteps=stride), axis=(2, 3)) return mc_array
def rolling_window_multichannel(array, window=(0,), stride=None, channel_last=True, agg=True): '\n \n ' if (not channel_last): array = np.moveaxis(array, 0, (- 1)) (w, h, c) = array.shape out_shape = (np.floor((((w - window[0]) / stride[0]) + 1)).astype(int), np.floor((((h - window[1]) / stride[1]) + 1)).astype(int)) if (not agg): mc_array = np.empty((out_shape[0], out_shape[1], window[0], window[1], c)).astype('float32') for i in range(c): mc_array[(..., i)] = rolling_window(array[(..., i)], window=(window[0], window[1]), asteps=stride) else: mc_array = np.empty((out_shape[0], out_shape[1], c)).astype('float32') for i in range(c): mc_array[(..., i)] = np.mean(rolling_window(array[(..., i)], window=(window[0], window[1]), asteps=stride), axis=(2, 3)) return mc_array<|docstring|>performs the rolling window function for many channels<|endoftext|>
81845ab8f72ebd2a5b2feae1867ed2b29c22f81fe888b209f730d8bf8c834c7f
def timeseries_rwm(array, window=(0,), stride=None, channel_last=True, agg=True): '\n performs the rolling window function for many channels\n ' (t, w, h, c) = array.shape t_array = [] for i in range(t): t_array.append(rolling_window_multichannel(array[i], window=window, stride=stride, channel_last=channel_last, agg=agg)) return np.moveaxis(np.array(t_array), 0, 2)
performs the rolling window function for many channels
src/analysis_tools.py
timeseries_rwm
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def timeseries_rwm(array, window=(0,), stride=None, channel_last=True, agg=True): '\n \n ' (t, w, h, c) = array.shape t_array = [] for i in range(t): t_array.append(rolling_window_multichannel(array[i], window=window, stride=stride, channel_last=channel_last, agg=agg)) return np.moveaxis(np.array(t_array), 0, 2)
def timeseries_rwm(array, window=(0,), stride=None, channel_last=True, agg=True): '\n \n ' (t, w, h, c) = array.shape t_array = [] for i in range(t): t_array.append(rolling_window_multichannel(array[i], window=window, stride=stride, channel_last=channel_last, agg=agg)) return np.moveaxis(np.array(t_array), 0, 2)<|docstring|>performs the rolling window function for many channels<|endoftext|>
cd1d3410d28ea6661a6967a79f84c32f83bae63f89740596ce365d9b42c4534e
def export_tif(image, ref_tif, outname, bands=None, dtype=gdal.GDT_Float32, metadata=None, bandmeta=None, verbose=True): '\n Input a numpy array image and a reference geotif \n to convert image to geotiff of same geotransform\n and projection. Note, if alpha_mask is not None,\n creates a 4 channel geotiff (alpha as last channel)\n\n Parameters:\n -----------\n image - 3D <numpy array>\n ref_tif - Geotiff reference <gdal object> of same dimensions\n outname - <str> file name to output to (use .tif extension)\n dtype - <str> denoting data type for GeoTiff. Defaults to 8 bit image,\n but can use gdal.GDT_Float32\n ' gt = ref_tif.GetGeoTransform() proj = ref_tif.GetProjection() xsize = np.shape(image)[1] ysize = np.shape(image)[0] if (bands is None): bands = ref_tif.RasterCount driver = gdal.GetDriverByName('GTiff') out = driver.Create(outname, xsize, ysize, bands, dtype) out.SetGeoTransform(gt) out.SetProjection(proj) if (metadata is not None): out.SetMetadata(metadata) if (bands == 1): band = out.GetRasterBand(1) band.WriteArray(image) if (bandmeta is not None): band.SetMetadata(bandmeta) else: for i in range(bands): band = out.GetRasterBand((i + 1)) band.WriteArray(image[(:, :, i)]) if (bandmeta is not None): band.SetMetadata(bandmeta[i]) out = None if verbose: return print(('created %s' % outname))
Input a numpy array image and a reference geotif to convert image to geotiff of same geotransform and projection. Note, if alpha_mask is not None, creates a 4 channel geotiff (alpha as last channel) Parameters: ----------- image - 3D <numpy array> ref_tif - Geotiff reference <gdal object> of same dimensions outname - <str> file name to output to (use .tif extension) dtype - <str> denoting data type for GeoTiff. Defaults to 8 bit image, but can use gdal.GDT_Float32
src/analysis_tools.py
export_tif
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def export_tif(image, ref_tif, outname, bands=None, dtype=gdal.GDT_Float32, metadata=None, bandmeta=None, verbose=True): '\n Input a numpy array image and a reference geotif \n to convert image to geotiff of same geotransform\n and projection. Note, if alpha_mask is not None,\n creates a 4 channel geotiff (alpha as last channel)\n\n Parameters:\n -----------\n image - 3D <numpy array>\n ref_tif - Geotiff reference <gdal object> of same dimensions\n outname - <str> file name to output to (use .tif extension)\n dtype - <str> denoting data type for GeoTiff. Defaults to 8 bit image,\n but can use gdal.GDT_Float32\n ' gt = ref_tif.GetGeoTransform() proj = ref_tif.GetProjection() xsize = np.shape(image)[1] ysize = np.shape(image)[0] if (bands is None): bands = ref_tif.RasterCount driver = gdal.GetDriverByName('GTiff') out = driver.Create(outname, xsize, ysize, bands, dtype) out.SetGeoTransform(gt) out.SetProjection(proj) if (metadata is not None): out.SetMetadata(metadata) if (bands == 1): band = out.GetRasterBand(1) band.WriteArray(image) if (bandmeta is not None): band.SetMetadata(bandmeta) else: for i in range(bands): band = out.GetRasterBand((i + 1)) band.WriteArray(image[(:, :, i)]) if (bandmeta is not None): band.SetMetadata(bandmeta[i]) out = None if verbose: return print(('created %s' % outname))
def export_tif(image, ref_tif, outname, bands=None, dtype=gdal.GDT_Float32, metadata=None, bandmeta=None, verbose=True): '\n Input a numpy array image and a reference geotif \n to convert image to geotiff of same geotransform\n and projection. Note, if alpha_mask is not None,\n creates a 4 channel geotiff (alpha as last channel)\n\n Parameters:\n -----------\n image - 3D <numpy array>\n ref_tif - Geotiff reference <gdal object> of same dimensions\n outname - <str> file name to output to (use .tif extension)\n dtype - <str> denoting data type for GeoTiff. Defaults to 8 bit image,\n but can use gdal.GDT_Float32\n ' gt = ref_tif.GetGeoTransform() proj = ref_tif.GetProjection() xsize = np.shape(image)[1] ysize = np.shape(image)[0] if (bands is None): bands = ref_tif.RasterCount driver = gdal.GetDriverByName('GTiff') out = driver.Create(outname, xsize, ysize, bands, dtype) out.SetGeoTransform(gt) out.SetProjection(proj) if (metadata is not None): out.SetMetadata(metadata) if (bands == 1): band = out.GetRasterBand(1) band.WriteArray(image) if (bandmeta is not None): band.SetMetadata(bandmeta) else: for i in range(bands): band = out.GetRasterBand((i + 1)) band.WriteArray(image[(:, :, i)]) if (bandmeta is not None): band.SetMetadata(bandmeta[i]) out = None if verbose: return print(('created %s' % outname))<|docstring|>Input a numpy array image and a reference geotif to convert image to geotiff of same geotransform and projection. Note, if alpha_mask is not None, creates a 4 channel geotiff (alpha as last channel) Parameters: ----------- image - 3D <numpy array> ref_tif - Geotiff reference <gdal object> of same dimensions outname - <str> file name to output to (use .tif extension) dtype - <str> denoting data type for GeoTiff. Defaults to 8 bit image, but can use gdal.GDT_Float32<|endoftext|>
bb1dc6f80d8062f2c2916819779e7bdbc30972fcf9381e3806d830660bf7440a
def landsat_simplify(src, outname): '\n Takes in a 10 band landsat file and converts it to a uint8 file\n with only 7 bands. \n Bands: 1,2,3,4,5,7,QA \n ' df = gdal.Open(src) gt = df.GetGeoTransform() proj = df.GetProjection() xsize = df.RasterXSize() ysize = df.RasterYSize() bands = 7 im = df.ReadAsArray() out_im = im[:5]
Takes in a 10 band landsat file and converts it to a uint8 file with only 7 bands. Bands: 1,2,3,4,5,7,QA
src/analysis_tools.py
landsat_simplify
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def landsat_simplify(src, outname): '\n Takes in a 10 band landsat file and converts it to a uint8 file\n with only 7 bands. \n Bands: 1,2,3,4,5,7,QA \n ' df = gdal.Open(src) gt = df.GetGeoTransform() proj = df.GetProjection() xsize = df.RasterXSize() ysize = df.RasterYSize() bands = 7 im = df.ReadAsArray() out_im = im[:5]
def landsat_simplify(src, outname): '\n Takes in a 10 band landsat file and converts it to a uint8 file\n with only 7 bands. \n Bands: 1,2,3,4,5,7,QA \n ' df = gdal.Open(src) gt = df.GetGeoTransform() proj = df.GetProjection() xsize = df.RasterXSize() ysize = df.RasterYSize() bands = 7 im = df.ReadAsArray() out_im = im[:5]<|docstring|>Takes in a 10 band landsat file and converts it to a uint8 file with only 7 bands. Bands: 1,2,3,4,5,7,QA<|endoftext|>
c629bd54f91690e61cce1f65bbd992e098a8ca709904e3534f8429b620859dfa
def landsat7_destripe(src, outname=None): '\n performs a gdal_fillnodata.py on src file\n ' if (outname is not None): copyfile(src, outname) to_destripe = outname else: to_destripe = src bandcount = gdal.Open(to_destripe).RasterCount bands = [0, 1, 2, 3, 4, 6, 9] mask = (~ np.isnan(gdal.Open(to_destripe).ReadAsArray())) maskfile = './data/ex/tempmask.tif' for i in range(1, bandcount): export_tif(mask[(i - 1)], gdal.Open(to_destripe), outname=maskfile, bands=1, dtype=gdal.GDT_Byte) src_ds = gdal.Open(to_destripe, gdalconst.GA_Update) srcband = src_ds.GetRasterBand(i) mask_ds = gdal.Open(maskfile) maskband = mask_ds.GetRasterBand(1) gdal.FillNodata(srcband, maskband, maxSearchDist=5, smoothingIterations=0) srcband = None maskband = None del src_ds del mask_ds return print(('%s made!' % outname))
performs a gdal_fillnodata.py on src file
src/analysis_tools.py
landsat7_destripe
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def landsat7_destripe(src, outname=None): '\n \n ' if (outname is not None): copyfile(src, outname) to_destripe = outname else: to_destripe = src bandcount = gdal.Open(to_destripe).RasterCount bands = [0, 1, 2, 3, 4, 6, 9] mask = (~ np.isnan(gdal.Open(to_destripe).ReadAsArray())) maskfile = './data/ex/tempmask.tif' for i in range(1, bandcount): export_tif(mask[(i - 1)], gdal.Open(to_destripe), outname=maskfile, bands=1, dtype=gdal.GDT_Byte) src_ds = gdal.Open(to_destripe, gdalconst.GA_Update) srcband = src_ds.GetRasterBand(i) mask_ds = gdal.Open(maskfile) maskband = mask_ds.GetRasterBand(1) gdal.FillNodata(srcband, maskband, maxSearchDist=5, smoothingIterations=0) srcband = None maskband = None del src_ds del mask_ds return print(('%s made!' % outname))
def landsat7_destripe(src, outname=None): '\n \n ' if (outname is not None): copyfile(src, outname) to_destripe = outname else: to_destripe = src bandcount = gdal.Open(to_destripe).RasterCount bands = [0, 1, 2, 3, 4, 6, 9] mask = (~ np.isnan(gdal.Open(to_destripe).ReadAsArray())) maskfile = './data/ex/tempmask.tif' for i in range(1, bandcount): export_tif(mask[(i - 1)], gdal.Open(to_destripe), outname=maskfile, bands=1, dtype=gdal.GDT_Byte) src_ds = gdal.Open(to_destripe, gdalconst.GA_Update) srcband = src_ds.GetRasterBand(i) mask_ds = gdal.Open(maskfile) maskband = mask_ds.GetRasterBand(1) gdal.FillNodata(srcband, maskband, maxSearchDist=5, smoothingIterations=0) srcband = None maskband = None del src_ds del mask_ds return print(('%s made!' % outname))<|docstring|>performs a gdal_fillnodata.py on src file<|endoftext|>
bc5103edbf8339394bcfe6c3cc279f3a56954cd00caba05d77b15de5ca29b49b
def import_model(model_json, model_weights): '\n Imports a keras model architecture and \n associated weights.abs\n Parameters:\n -----------\n model_json : <str> of keras model in json\n format\n\n model_weights : <str> of keras model parameters weights\n ' json_file = open(model_json, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = keras.models.model_from_json(loaded_model_json) loaded_model.load_weights(model_weights) return loaded_model
Imports a keras model architecture and associated weights.abs Parameters: ----------- model_json : <str> of keras model in json format model_weights : <str> of keras model parameters weights
src/analysis_tools.py
import_model
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def import_model(model_json, model_weights): '\n Imports a keras model architecture and \n associated weights.abs\n Parameters:\n -----------\n model_json : <str> of keras model in json\n format\n\n model_weights : <str> of keras model parameters weights\n ' json_file = open(model_json, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = keras.models.model_from_json(loaded_model_json) loaded_model.load_weights(model_weights) return loaded_model
def import_model(model_json, model_weights): '\n Imports a keras model architecture and \n associated weights.abs\n Parameters:\n -----------\n model_json : <str> of keras model in json\n format\n\n model_weights : <str> of keras model parameters weights\n ' json_file = open(model_json, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = keras.models.model_from_json(loaded_model_json) loaded_model.load_weights(model_weights) return loaded_model<|docstring|>Imports a keras model architecture and associated weights.abs Parameters: ----------- model_json : <str> of keras model in json format model_weights : <str> of keras model parameters weights<|endoftext|>
8a3644622d32b64a2a19c14de077211bd3f9b4ca7e23d98656b698fcc7b91b95
def plot_confusion_matrix(cm, classes, normalize=False, outname='conf_mat.png', cmap=plt.cm.Blues): '\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n ' if normalize: cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)]) title = 'Normalized confusion matrix' print(title) else: title = 'Confusion matrix' print(title) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) cb = plt.colorbar() if normalize: cb.set_label('Percent') else: cb.set_label('$n$') tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ('.3f' if normalize else 'd') thresh = (cm.max() / 2.0) for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[(i, j)], fmt), horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black')) plt.tight_layout() plt.ylabel('Measured label') plt.xlabel('Predicted label') plt.savefig(outname, dpi=300, bbox_inches='tight', padding=False) plt.show() plt.close()
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
src/analysis_tools.py
plot_confusion_matrix
tonychang-cspinc/AIforEarth-Forest-Disturbance
0
python
def plot_confusion_matrix(cm, classes, normalize=False, outname='conf_mat.png', cmap=plt.cm.Blues): '\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n ' if normalize: cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)]) title = 'Normalized confusion matrix' print(title) else: title = 'Confusion matrix' print(title) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) cb = plt.colorbar() if normalize: cb.set_label('Percent') else: cb.set_label('$n$') tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ('.3f' if normalize else 'd') thresh = (cm.max() / 2.0) for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[(i, j)], fmt), horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black')) plt.tight_layout() plt.ylabel('Measured label') plt.xlabel('Predicted label') plt.savefig(outname, dpi=300, bbox_inches='tight', padding=False) plt.show() plt.close()
def plot_confusion_matrix(cm, classes, normalize=False, outname='conf_mat.png', cmap=plt.cm.Blues): '\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n ' if normalize: cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)]) title = 'Normalized confusion matrix' print(title) else: title = 'Confusion matrix' print(title) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) cb = plt.colorbar() if normalize: cb.set_label('Percent') else: cb.set_label('$n$') tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ('.3f' if normalize else 'd') thresh = (cm.max() / 2.0) for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[(i, j)], fmt), horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black')) plt.tight_layout() plt.ylabel('Measured label') plt.xlabel('Predicted label') plt.savefig(outname, dpi=300, bbox_inches='tight', padding=False) plt.show() plt.close()<|docstring|>This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.<|endoftext|>
557116b154e969874223bb5ab4e7a525580c883e69749dd5c085a8812cddab39
def filter(self, table, volumes, filter_string): 'Naive case-insensitive search.' q = filter_string.lower() return [volume for volume in volumes if (q in volume.name.lower())]
Naive case-insensitive search.
openstack_dashboard/dashboards/project/volumes/tables.py
filter
swapnil2725/horizon
930
python
def filter(self, table, volumes, filter_string): q = filter_string.lower() return [volume for volume in volumes if (q in volume.name.lower())]
def filter(self, table, volumes, filter_string): q = filter_string.lower() return [volume for volume in volumes if (q in volume.name.lower())]<|docstring|>Naive case-insensitive search.<|endoftext|>
879c953861b691715b82bb59113c82081875e76f40461b2890a1b24253739b40
def get_kmi_current_month(include_temperature_equivalent=True, include_heating_degree_days=True, heating_base_temperatures=[16.5], include_cooling_degree_days=True, cooling_base_temperatures=[18], solar_duration_as_minutes=False, include_wind_power=False): '\n Gets the current month table from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html\n and parse it into a Pandas DataFrame\n\n Parameters\n ----------\n include_temperature_equivalent : bool\n include_heating_degree_days : bool\n heating_base_temperatures : list of floats\n include_cooling_degree_days : bool\n cooling_base_temperatures : list of floats\n solar_duration_as_minutes : bool\n include_wind_power : bool\n\n Returns\n -------\n Pandas DataFrame\n ' html = fetch_website() df = parse(html=html, solar_duration_as_minutes=solar_duration_as_minutes) if (include_temperature_equivalent or include_heating_degree_days or include_cooling_degree_days): temp_equiv = calculate_temperature_equivalent(temperatures=df.temp_gem) if include_temperature_equivalent: df = df.join(temp_equiv) if include_heating_degree_days: for base_temperature in heating_base_temperatures: degree_days = calculate_degree_days(temperature_equivalent=temp_equiv, base_temperature=base_temperature) df = df.join(degree_days) if include_cooling_degree_days: for base_temperature in cooling_base_temperatures: degree_days = calculate_degree_days(temperature_equivalent=temp_equiv, base_temperature=base_temperature, cooling=True) df = df.join(degree_days) if include_wind_power: df['wind_power'] = (df.wind_snelh ** 3) return df
Gets the current month table from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html and parse it into a Pandas DataFrame Parameters ---------- include_temperature_equivalent : bool include_heating_degree_days : bool heating_base_temperatures : list of floats include_cooling_degree_days : bool cooling_base_temperatures : list of floats solar_duration_as_minutes : bool include_wind_power : bool Returns ------- Pandas DataFrame
opengrid_dev/library/kmi.py
get_kmi_current_month
opengridcc/opengrid_dev
8
python
def get_kmi_current_month(include_temperature_equivalent=True, include_heating_degree_days=True, heating_base_temperatures=[16.5], include_cooling_degree_days=True, cooling_base_temperatures=[18], solar_duration_as_minutes=False, include_wind_power=False): '\n Gets the current month table from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html\n and parse it into a Pandas DataFrame\n\n Parameters\n ----------\n include_temperature_equivalent : bool\n include_heating_degree_days : bool\n heating_base_temperatures : list of floats\n include_cooling_degree_days : bool\n cooling_base_temperatures : list of floats\n solar_duration_as_minutes : bool\n include_wind_power : bool\n\n Returns\n -------\n Pandas DataFrame\n ' html = fetch_website() df = parse(html=html, solar_duration_as_minutes=solar_duration_as_minutes) if (include_temperature_equivalent or include_heating_degree_days or include_cooling_degree_days): temp_equiv = calculate_temperature_equivalent(temperatures=df.temp_gem) if include_temperature_equivalent: df = df.join(temp_equiv) if include_heating_degree_days: for base_temperature in heating_base_temperatures: degree_days = calculate_degree_days(temperature_equivalent=temp_equiv, base_temperature=base_temperature) df = df.join(degree_days) if include_cooling_degree_days: for base_temperature in cooling_base_temperatures: degree_days = calculate_degree_days(temperature_equivalent=temp_equiv, base_temperature=base_temperature, cooling=True) df = df.join(degree_days) if include_wind_power: df['wind_power'] = (df.wind_snelh ** 3) return df
def get_kmi_current_month(include_temperature_equivalent=True, include_heating_degree_days=True, heating_base_temperatures=[16.5], include_cooling_degree_days=True, cooling_base_temperatures=[18], solar_duration_as_minutes=False, include_wind_power=False): '\n Gets the current month table from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html\n and parse it into a Pandas DataFrame\n\n Parameters\n ----------\n include_temperature_equivalent : bool\n include_heating_degree_days : bool\n heating_base_temperatures : list of floats\n include_cooling_degree_days : bool\n cooling_base_temperatures : list of floats\n solar_duration_as_minutes : bool\n include_wind_power : bool\n\n Returns\n -------\n Pandas DataFrame\n ' html = fetch_website() df = parse(html=html, solar_duration_as_minutes=solar_duration_as_minutes) if (include_temperature_equivalent or include_heating_degree_days or include_cooling_degree_days): temp_equiv = calculate_temperature_equivalent(temperatures=df.temp_gem) if include_temperature_equivalent: df = df.join(temp_equiv) if include_heating_degree_days: for base_temperature in heating_base_temperatures: degree_days = calculate_degree_days(temperature_equivalent=temp_equiv, base_temperature=base_temperature) df = df.join(degree_days) if include_cooling_degree_days: for base_temperature in cooling_base_temperatures: degree_days = calculate_degree_days(temperature_equivalent=temp_equiv, base_temperature=base_temperature, cooling=True) df = df.join(degree_days) if include_wind_power: df['wind_power'] = (df.wind_snelh ** 3) return df<|docstring|>Gets the current month table from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html and parse it into a Pandas DataFrame Parameters ---------- include_temperature_equivalent : bool include_heating_degree_days : bool heating_base_temperatures : list of floats include_cooling_degree_days : bool cooling_base_temperatures : list of floats solar_duration_as_minutes : bool include_wind_power : bool Returns ------- Pandas DataFrame<|endoftext|>
e7cd87dc93b015cf2b79f0628565e8ad5581f4e2060d45109cc7b681aee83acb
def fetch_website(url='http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html'): '\n Fetch the website containing the data from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html\n\n Returns\n -------\n str\n ' r = requests.get(url) if (r.status_code == 200): return r.text else: raise Exception('Seems like you got code {}'.format(r.status_code))
Fetch the website containing the data from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html Returns ------- str
opengrid_dev/library/kmi.py
fetch_website
opengridcc/opengrid_dev
8
python
def fetch_website(url='http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html'): '\n Fetch the website containing the data from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html\n\n Returns\n -------\n str\n ' r = requests.get(url) if (r.status_code == 200): return r.text else: raise Exception('Seems like you got code {}'.format(r.status_code))
def fetch_website(url='http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html'): '\n Fetch the website containing the data from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html\n\n Returns\n -------\n str\n ' r = requests.get(url) if (r.status_code == 200): return r.text else: raise Exception('Seems like you got code {}'.format(r.status_code))<|docstring|>Fetch the website containing the data from http://www.meteo.be/meteo/view/nl/123763-Huidige+maand.html Returns ------- str<|endoftext|>