body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = os.path.splitext(raw_filename)[0] item.source_label = 'mixture'
-3,948,935,948,919,123,000
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = os.path.splitext(raw_filename)[0] item.source_label = 'mixture'
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = MetaDataContainer() annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) for annotation_filename in annotation_files: scene_label = os.path.split(os.path.split(annotation_filename)[0])[1] identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0] audio_filename = os.path.join('audio', scene_label, (identifier + '.wav')) data = MetaDataContainer(filename=annotation_filename).load() for item in data: item.filename = audio_filename item.scene_label = scene_label self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
-7,578,620,383,298,427,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = MetaDataContainer() annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) for annotation_filename in annotation_files: scene_label = os.path.split(os.path.split(annotation_filename)[0])[1] identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0] audio_filename = os.path.join('audio', scene_label, (identifier + '.wav')) data = MetaDataContainer(filename=annotation_filename).load() for item in data: item.filename = audio_filename item.scene_label = scene_label self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-sound-events-2016-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2016-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, evaluation dataset', 'url': 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/996424/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2016-evaluation.doc.zip'), 'remote_bytes': 69834, 'remote_md5': '0644b54d96f4cefd0ecb2c7ea9161aa9', 'filename': 'TUT-sound-events-2016-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2016-evaluation.meta.zip'), 'remote_bytes': 41608, 'remote_md5': '91c266b0780ac619a0d74298a3805e9e', 'filename': 'TUT-sound-events-2016-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2016-evaluation.audio.zip'), 'remote_bytes': 471072452, 'remote_md5': '29434e8c53bd51206df0234e6cf2238c', 'filename': 'TUT-sound-events-2016-evaluation.audio.zip'}] kwargs['audio_paths'] = [os.path.join('audio', 'home'), os.path.join('audio', 'residential_area')] super(TUTSoundEvents_2016_EvaluationSet, self).__init__(**kwargs)
5,616,250,338,590,877,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-sound-events-2016-evaluation' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-sound-events-2016-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2016-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, evaluation dataset', 'url': 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/996424/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2016-evaluation.doc.zip'), 'remote_bytes': 69834, 'remote_md5': '0644b54d96f4cefd0ecb2c7ea9161aa9', 'filename': 'TUT-sound-events-2016-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2016-evaluation.meta.zip'), 'remote_bytes': 41608, 'remote_md5': '91c266b0780ac619a0d74298a3805e9e', 'filename': 'TUT-sound-events-2016-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2016-evaluation.audio.zip'), 'remote_bytes': 471072452, 'remote_md5': '29434e8c53bd51206df0234e6cf2238c', 'filename': 'TUT-sound-events-2016-evaluation.audio.zip'}] kwargs['audio_paths'] = [os.path.join('audio', 'home'), os.path.join('audio', 'residential_area')] super(TUTSoundEvents_2016_EvaluationSet, self).__init__(**kwargs)
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if ((not self.meta_container.exists()) and os.path.isdir(os.path.join(self.local_path, 'meta'))): meta_data = MetaDataContainer() annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) for annotation_filename in annotation_files: scene_label = os.path.split(os.path.split(annotation_filename)[0])[1] identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0] audio_filename = os.path.join('audio', scene_label, (identifier + '.wav')) data = MetaDataContainer(filename=annotation_filename).load(decimal='comma') for item in data: item.filename = audio_filename item.scene_label = scene_label self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
1,593,265,729,418,320,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if ((not self.meta_container.exists()) and os.path.isdir(os.path.join(self.local_path, 'meta'))): meta_data = MetaDataContainer() annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) for annotation_filename in annotation_files: scene_label = os.path.split(os.path.split(annotation_filename)[0])[1] identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0] audio_filename = os.path.join('audio', scene_label, (identifier + '.wav')) data = MetaDataContainer(filename=annotation_filename).load(decimal='comma') for item in data: item.filename = audio_filename item.scene_label = scene_label self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-SED-synthetic-2016', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-SED-synthetic-2016'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Emre Cakir', 'title': 'TUT-SED Synthetic 2016', 'url': 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/tut-sed-synthetic-2016', 'audio_source': 'Field recording', 'audio_type': 'Synthetic', 'recording_device_model': 'Unknown', 'microphone_model': 'Unknown'} kwargs['crossvalidation_folds'] = 1 source_url = 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/datasets/TUT-SED-synthetic-2016/' kwargs['package_list'] = [{'content_type': 'meta', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.meta.zip'), 'remote_bytes': 973618, 'remote_md5': 'e2ae895bdf39f2a359a97bb0bcf76101', 'filename': 'TUT-SED-synthetic-2016.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.1.zip'), 'remote_bytes': 1026369647, 'remote_md5': 'ede8b9c6d1b0d1d64bfc5791404f58fb', 'filename': 'TUT-SED-synthetic-2016.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.2.zip'), 'remote_bytes': 1018650039, 'remote_md5': 'cde647a377a58fc74e3012139d65c447', 'filename': 'TUT-SED-synthetic-2016.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.3.zip'), 'remote_bytes': 1070239392, 'remote_md5': '5fc2824dcce442f441f4c6a975881789', 'filename': 'TUT-SED-synthetic-2016.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.4.zip'), 'remote_bytes': 1040622610, 'remote_md5': '4ba016d949171ccc8493d3d274009825', 'filename': 'TUT-SED-synthetic-2016.audio.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.5.zip'), 'remote_bytes': 264812997, 'remote_md5': '6a44578dd7738bd4ba044d5d2b9a5448', 'filename': 'TUT-SED-synthetic-2016.audio.5.zip'}, {'content_type': 'features', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.features.zip'), 'remote_bytes': 480894082, 'remote_md5': '66bc0abc19a276986964a6d4a2d2f6bc', 'filename': 'TUT-SED-synthetic-2016.features.zip'}] kwargs['audio_paths'] = ['audio'] super(TUT_SED_Synthetic_2016, self).__init__(**kwargs)
-5,004,241,706,890,039,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-SED-synthetic-2016' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-SED-synthetic-2016', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-SED-synthetic-2016'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Emre Cakir', 'title': 'TUT-SED Synthetic 2016', 'url': 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/tut-sed-synthetic-2016', 'audio_source': 'Field recording', 'audio_type': 'Synthetic', 'recording_device_model': 'Unknown', 'microphone_model': 'Unknown'} kwargs['crossvalidation_folds'] = 1 source_url = 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/datasets/TUT-SED-synthetic-2016/' kwargs['package_list'] = [{'content_type': 'meta', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.meta.zip'), 'remote_bytes': 973618, 'remote_md5': 'e2ae895bdf39f2a359a97bb0bcf76101', 'filename': 'TUT-SED-synthetic-2016.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.1.zip'), 'remote_bytes': 1026369647, 'remote_md5': 'ede8b9c6d1b0d1d64bfc5791404f58fb', 'filename': 'TUT-SED-synthetic-2016.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.2.zip'), 'remote_bytes': 1018650039, 'remote_md5': 'cde647a377a58fc74e3012139d65c447', 'filename': 'TUT-SED-synthetic-2016.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.3.zip'), 'remote_bytes': 1070239392, 'remote_md5': '5fc2824dcce442f441f4c6a975881789', 'filename': 'TUT-SED-synthetic-2016.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.4.zip'), 'remote_bytes': 1040622610, 'remote_md5': '4ba016d949171ccc8493d3d274009825', 'filename': 'TUT-SED-synthetic-2016.audio.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.audio.5.zip'), 'remote_bytes': 264812997, 'remote_md5': '6a44578dd7738bd4ba044d5d2b9a5448', 'filename': 'TUT-SED-synthetic-2016.audio.5.zip'}, {'content_type': 'features', 'remote_file': (source_url + 'TUT-SED-synthetic-2016.features.zip'), 'remote_bytes': 480894082, 'remote_md5': '66bc0abc19a276986964a6d4a2d2f6bc', 'filename': 'TUT-SED-synthetic-2016.features.zip'}] kwargs['audio_paths'] = ['audio'] super(TUT_SED_Synthetic_2016, self).__init__(**kwargs)
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['txt']) meta_data = MetaDataContainer() for meta_filename in meta_files: audio_filename = os.path.join('audio', os.path.split(meta_filename)[1].replace('.txt', '.wav')) data = MetaDataContainer(filename=meta_filename).load() for item in data: item.filename = audio_filename item.scene_label = 'synthetic' item.source_label = 'm' self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
-6,557,052,325,882,180,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['txt']) meta_data = MetaDataContainer() for meta_filename in meta_files: audio_filename = os.path.join('audio', os.path.split(meta_filename)[1].replace('.txt', '.wav')) data = MetaDataContainer(filename=meta_filename).load() for item in data: item.filename = audio_filename item.scene_label = 'synthetic' item.source_label = 'm' self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
def file_features(self, filename): 'Pre-calculated acoustic features for given file\n\n Parameters\n ----------\n filename : str\n File name\n\n Returns\n -------\n data : numpy.ndarray\n Matrix containing acoustic features\n\n ' filename_ = self.absolute_to_relative_path(filename).replace('audio/', 'features/') filename_ = (os.path.splitext(filename_)[0] + '.cpickle') if os.path.isfile(os.path.join(self.local_path, filename_)): feature_data = pickle.load(open(os.path.join(self.local_path, filename_), 'rb')) return feature_data['feat'] else: return None
428,636,534,373,538,800
Pre-calculated acoustic features for given file Parameters ---------- filename : str File name Returns ------- data : numpy.ndarray Matrix containing acoustic features
dcase_util/datasets/tut.py
file_features
ankitshah009/dcase_util
python
def file_features(self, filename): 'Pre-calculated acoustic features for given file\n\n Parameters\n ----------\n filename : str\n File name\n\n Returns\n -------\n data : numpy.ndarray\n Matrix containing acoustic features\n\n ' filename_ = self.absolute_to_relative_path(filename).replace('audio/', 'features/') filename_ = (os.path.splitext(filename_)[0] + '.cpickle') if os.path.isfile(os.path.join(self.local_path, filename_)): feature_data = pickle.load(open(os.path.join(self.local_path, filename_), 'rb')) return feature_data['feat'] else: return None
def _reset(self, new_min_lr=None, new_max_lr=None, new_base_epochs=None, new_mul_epochs=None): 'Resets cycle iterations.' if (new_min_lr != None): self.min_lr = new_min_lr if (new_max_lr != None): self.max_lr = new_max_lr if (new_base_epochs != None): self.base_epochs = new_base_epochs if (new_mul_epochs != None): self.mul_epochs = new_mul_epochs self.cycles = 0.0 self.cycle_iterations = 0.0
-8,164,284,948,579,837,000
Resets cycle iterations.
sgdr_callback.py
_reset
Callidior/semantic-embeddings
python
def _reset(self, new_min_lr=None, new_max_lr=None, new_base_epochs=None, new_mul_epochs=None): if (new_min_lr != None): self.min_lr = new_min_lr if (new_max_lr != None): self.max_lr = new_max_lr if (new_base_epochs != None): self.base_epochs = new_base_epochs if (new_mul_epochs != None): self.mul_epochs = new_mul_epochs self.cycles = 0.0 self.cycle_iterations = 0.0
@pytest.fixture(scope='session') def selenium_patcher(): 'Add custom .' add_custom_commands()
1,832,878,463,908,379,400
Add custom .
pytest_selenium_enhancer/plugin.py
selenium_patcher
popescunsergiu/pytest-selenium-enhancer
python
@pytest.fixture(scope='session') def selenium_patcher(): add_custom_commands()
def _check_cache_entry(self, entry): 'Assert validity of the cache entry.' self.assertIsInstance(entry.site, BaseSite) self.assertIsInstance(entry.site._loginstatus, int) self.assertIsInstance(entry.site._username, list) if (entry.site._loginstatus >= 1): self.assertIsNotNone(entry.site._username[0]) self.assertIsInstance(entry._params, dict) self.assertIsNotNone(entry._params)
-5,294,106,942,538,693,000
Assert validity of the cache entry.
tests/cache_tests.py
_check_cache_entry
Annie201/pywikibot-core
python
def _check_cache_entry(self, entry): self.assertIsInstance(entry.site, BaseSite) self.assertIsInstance(entry.site._loginstatus, int) self.assertIsInstance(entry.site._username, list) if (entry.site._loginstatus >= 1): self.assertIsNotNone(entry.site._username[0]) self.assertIsInstance(entry._params, dict) self.assertIsNotNone(entry._params)
def test_cache(self): 'Test the apicache by doing _check_cache_entry over each entry.' cache.process_entries(_cache_dir, self._check_cache_entry)
-1,055,730,274,172,869,800
Test the apicache by doing _check_cache_entry over each entry.
tests/cache_tests.py
test_cache
Annie201/pywikibot-core
python
def test_cache(self): cache.process_entries(_cache_dir, self._check_cache_entry)
def setup_argparse_only(): 'Wrapper for ``setup_argparse()`` that only returns the parser.\n\n Only used in sphinx documentation via ``sphinx-argparse``.\n ' return setup_argparse()[0]
1,517,469,603,038,058,800
Wrapper for ``setup_argparse()`` that only returns the parser. Only used in sphinx documentation via ``sphinx-argparse``.
varfish_cli/__main__.py
setup_argparse_only
bihealth/varfish-cli
python
def setup_argparse_only(): 'Wrapper for ``setup_argparse()`` that only returns the parser.\n\n Only used in sphinx documentation via ``sphinx-argparse``.\n ' return setup_argparse()[0]
def setup_argparse(): 'Create argument parser.' parser = argparse.ArgumentParser(prog='varfish-cli') parser.add_argument('--verbose', action='store_true', default=False, help='Increase verbosity.') parser.add_argument('--version', action='version', version=('%%(prog)s %s' % __version__)) group = parser.add_argument_group('Basic Configuration') group.add_argument('--no-verify-ssl', dest='verify_ssl', default=True, action='store_false', help='Disable HTTPS SSL verification') group.add_argument('--config', default=os.environ.get('VARFISH_CONFIG_PATH', None), help='Path to configuration file.') group.add_argument('--varfish-server-url', default=os.environ.get('VARFISH_SERVER_URL', None), help='VarFish server URL key to use, defaults to env VARFISH_SERVER_URL.') group.add_argument('--varfish-api-token', default=os.environ.get('VARFISH_API_TOKEN', None), help='VarFish API token to use, defaults to env VARFISH_API_TOKEN.') subparsers = parser.add_subparsers(dest='cmd') setup_argparse_case(subparsers.add_parser('case', help='Work with cases.')) return (parser, subparsers)
-638,400,639,551,440,800
Create argument parser.
varfish_cli/__main__.py
setup_argparse
bihealth/varfish-cli
python
def setup_argparse(): parser = argparse.ArgumentParser(prog='varfish-cli') parser.add_argument('--verbose', action='store_true', default=False, help='Increase verbosity.') parser.add_argument('--version', action='version', version=('%%(prog)s %s' % __version__)) group = parser.add_argument_group('Basic Configuration') group.add_argument('--no-verify-ssl', dest='verify_ssl', default=True, action='store_false', help='Disable HTTPS SSL verification') group.add_argument('--config', default=os.environ.get('VARFISH_CONFIG_PATH', None), help='Path to configuration file.') group.add_argument('--varfish-server-url', default=os.environ.get('VARFISH_SERVER_URL', None), help='VarFish server URL key to use, defaults to env VARFISH_SERVER_URL.') group.add_argument('--varfish-api-token', default=os.environ.get('VARFISH_API_TOKEN', None), help='VarFish API token to use, defaults to env VARFISH_API_TOKEN.') subparsers = parser.add_subparsers(dest='cmd') setup_argparse_case(subparsers.add_parser('case', help='Work with cases.')) return (parser, subparsers)
def main(argv=None): 'Main entry point before parsing command line arguments.' (parser, subparsers) = setup_argparse() args = parser.parse_args(argv) if args.verbose: level = logging.DEBUG else: formatter = logzero.LogFormatter(fmt='%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s') logzero.formatter(formatter) level = logging.INFO logzero.loglevel(level=level) if args.config: config_paths = (args.config,) else: config_paths = GLOBAL_CONFIG_PATHS for config_path in config_paths: config_path = os.path.expanduser(os.path.expandvars(config_path)) if os.path.exists(config_path): with open(config_path, 'rt') as tomlf: toml_config = toml.load(tomlf) break else: toml_config = None logger.info('Could not find any of the global configuration files %s.', config_paths) config = CommonConfig.create(args, toml_config) cmds = {None: run_nocmd, 'case': run_case} res = cmds[args.cmd](config, toml_config, args, parser, (subparsers.choices[args.cmd] if args.cmd else None)) if (not res): logger.info('All done. Have a nice day!') else: logger.error('Something did not work out correctly.') return res
3,123,301,595,510,910,000
Main entry point before parsing command line arguments.
varfish_cli/__main__.py
main
bihealth/varfish-cli
python
def main(argv=None): (parser, subparsers) = setup_argparse() args = parser.parse_args(argv) if args.verbose: level = logging.DEBUG else: formatter = logzero.LogFormatter(fmt='%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s') logzero.formatter(formatter) level = logging.INFO logzero.loglevel(level=level) if args.config: config_paths = (args.config,) else: config_paths = GLOBAL_CONFIG_PATHS for config_path in config_paths: config_path = os.path.expanduser(os.path.expandvars(config_path)) if os.path.exists(config_path): with open(config_path, 'rt') as tomlf: toml_config = toml.load(tomlf) break else: toml_config = None logger.info('Could not find any of the global configuration files %s.', config_paths) config = CommonConfig.create(args, toml_config) cmds = {None: run_nocmd, 'case': run_case} res = cmds[args.cmd](config, toml_config, args, parser, (subparsers.choices[args.cmd] if args.cmd else None)) if (not res): logger.info('All done. Have a nice day!') else: logger.error('Something did not work out correctly.') return res
def get_train_hooks(name_list, use_tpu=False, **kwargs): 'Factory for getting a list of TensorFlow hooks for training by name.\n\n Args:\n name_list: a list of strings to name desired hook classes. Allowed:\n LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined\n as keys in HOOKS\n use_tpu: Boolean of whether computation occurs on a TPU. This will disable\n hooks altogether.\n **kwargs: a dictionary of arguments to the hooks.\n\n Returns:\n list of instantiated hooks, ready to be used in a classifier.train call.\n\n Raises:\n ValueError: if an unrecognized name is passed.\n ' if (not name_list): return [] if use_tpu: tf.logging.warning('hooks_helper received name_list `{}`, but a TPU is specified. No hooks will be used.'.format(name_list)) return [] train_hooks = [] for name in name_list: hook_name = HOOKS.get(name.strip().lower()) if (hook_name is None): raise ValueError('Unrecognized training hook requested: {}'.format(name)) else: train_hooks.append(hook_name(**kwargs)) return train_hooks
8,321,067,302,129,089,000
Factory for getting a list of TensorFlow hooks for training by name. Args: name_list: a list of strings to name desired hook classes. Allowed: LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined as keys in HOOKS use_tpu: Boolean of whether computation occurs on a TPU. This will disable hooks altogether. **kwargs: a dictionary of arguments to the hooks. Returns: list of instantiated hooks, ready to be used in a classifier.train call. Raises: ValueError: if an unrecognized name is passed.
official/utils/logs/hooks_helper.py
get_train_hooks
Mithilesh1609/assembled-cnn
python
def get_train_hooks(name_list, use_tpu=False, **kwargs): 'Factory for getting a list of TensorFlow hooks for training by name.\n\n Args:\n name_list: a list of strings to name desired hook classes. Allowed:\n LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined\n as keys in HOOKS\n use_tpu: Boolean of whether computation occurs on a TPU. This will disable\n hooks altogether.\n **kwargs: a dictionary of arguments to the hooks.\n\n Returns:\n list of instantiated hooks, ready to be used in a classifier.train call.\n\n Raises:\n ValueError: if an unrecognized name is passed.\n ' if (not name_list): return [] if use_tpu: tf.logging.warning('hooks_helper received name_list `{}`, but a TPU is specified. No hooks will be used.'.format(name_list)) return [] train_hooks = [] for name in name_list: hook_name = HOOKS.get(name.strip().lower()) if (hook_name is None): raise ValueError('Unrecognized training hook requested: {}'.format(name)) else: train_hooks.append(hook_name(**kwargs)) return train_hooks
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): 'Function to get LoggingTensorHook.\n\n Args:\n every_n_iter: `int`, print the values of `tensors` once every N local\n steps taken on the current worker.\n tensors_to_log: List of tensor names or dictionary mapping labels to tensor\n names. If not set, log _TENSORS_TO_LOG by default.\n **kwargs: a dictionary of arguments to LoggingTensorHook.\n\n Returns:\n Returns a LoggingTensorHook with a standard set of tensors that will be\n printed to stdout.\n ' if (tensors_to_log is None): tensors_to_log = _TENSORS_TO_LOG return tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=every_n_iter)
-2,488,998,155,569,033,700
Function to get LoggingTensorHook. Args: every_n_iter: `int`, print the values of `tensors` once every N local steps taken on the current worker. tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. **kwargs: a dictionary of arguments to LoggingTensorHook. Returns: Returns a LoggingTensorHook with a standard set of tensors that will be printed to stdout.
official/utils/logs/hooks_helper.py
get_logging_tensor_hook
Mithilesh1609/assembled-cnn
python
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): 'Function to get LoggingTensorHook.\n\n Args:\n every_n_iter: `int`, print the values of `tensors` once every N local\n steps taken on the current worker.\n tensors_to_log: List of tensor names or dictionary mapping labels to tensor\n names. If not set, log _TENSORS_TO_LOG by default.\n **kwargs: a dictionary of arguments to LoggingTensorHook.\n\n Returns:\n Returns a LoggingTensorHook with a standard set of tensors that will be\n printed to stdout.\n ' if (tensors_to_log is None): tensors_to_log = _TENSORS_TO_LOG return tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=every_n_iter)
def get_profiler_hook(model_dir, save_steps=1000, **kwargs): 'Function to get ProfilerHook.\n\n Args:\n model_dir: The directory to save the profile traces to.\n save_steps: `int`, print profile traces every N steps.\n **kwargs: a dictionary of arguments to ProfilerHook.\n\n Returns:\n Returns a ProfilerHook that writes out timelines that can be loaded into\n profiling tools like chrome://tracing.\n ' return tf.train.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
-3,053,697,511,662,991,400
Function to get ProfilerHook. Args: model_dir: The directory to save the profile traces to. save_steps: `int`, print profile traces every N steps. **kwargs: a dictionary of arguments to ProfilerHook. Returns: Returns a ProfilerHook that writes out timelines that can be loaded into profiling tools like chrome://tracing.
official/utils/logs/hooks_helper.py
get_profiler_hook
Mithilesh1609/assembled-cnn
python
def get_profiler_hook(model_dir, save_steps=1000, **kwargs): 'Function to get ProfilerHook.\n\n Args:\n model_dir: The directory to save the profile traces to.\n save_steps: `int`, print profile traces every N steps.\n **kwargs: a dictionary of arguments to ProfilerHook.\n\n Returns:\n Returns a ProfilerHook that writes out timelines that can be loaded into\n profiling tools like chrome://tracing.\n ' return tf.train.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
def get_examples_per_second_hook(every_n_steps=100, batch_size=128, warm_steps=5, **kwargs): 'Function to get ExamplesPerSecondHook.\n\n Args:\n every_n_steps: `int`, print current and average examples per second every\n N steps.\n batch_size: `int`, total batch size used to calculate examples/second from\n global time.\n warm_steps: skip this number of steps before logging and running average.\n **kwargs: a dictionary of arguments to ExamplesPerSecondHook.\n\n Returns:\n Returns a ProfilerHook that writes out timelines that can be loaded into\n profiling tools like chrome://tracing.\n ' return hooks.ExamplesPerSecondHook(batch_size=batch_size, every_n_steps=every_n_steps, warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
683,162,254,790,537,200
Function to get ExamplesPerSecondHook. Args: every_n_steps: `int`, print current and average examples per second every N steps. batch_size: `int`, total batch size used to calculate examples/second from global time. warm_steps: skip this number of steps before logging and running average. **kwargs: a dictionary of arguments to ExamplesPerSecondHook. Returns: Returns a ProfilerHook that writes out timelines that can be loaded into profiling tools like chrome://tracing.
official/utils/logs/hooks_helper.py
get_examples_per_second_hook
Mithilesh1609/assembled-cnn
python
def get_examples_per_second_hook(every_n_steps=100, batch_size=128, warm_steps=5, **kwargs): 'Function to get ExamplesPerSecondHook.\n\n Args:\n every_n_steps: `int`, print current and average examples per second every\n N steps.\n batch_size: `int`, total batch size used to calculate examples/second from\n global time.\n warm_steps: skip this number of steps before logging and running average.\n **kwargs: a dictionary of arguments to ExamplesPerSecondHook.\n\n Returns:\n Returns a ProfilerHook that writes out timelines that can be loaded into\n profiling tools like chrome://tracing.\n ' return hooks.ExamplesPerSecondHook(batch_size=batch_size, every_n_steps=every_n_steps, warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
def get_logging_metric_hook(tensors_to_log=None, every_n_secs=600, **kwargs): 'Function to get LoggingMetricHook.\n\n Args:\n tensors_to_log: List of tensor names or dictionary mapping labels to tensor\n names. If not set, log _TENSORS_TO_LOG by default.\n every_n_secs: `int`, the frequency for logging the metric. Default to every\n 10 mins.\n\n Returns:\n Returns a LoggingMetricHook that saves tensor values in a JSON format.\n ' if (tensors_to_log is None): tensors_to_log = _TENSORS_TO_LOG return metric_hook.LoggingMetricHook(tensors=tensors_to_log, metric_logger=logger.get_benchmark_logger(), every_n_secs=every_n_secs)
-5,095,878,965,021,634,000
Function to get LoggingMetricHook. Args: tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. every_n_secs: `int`, the frequency for logging the metric. Default to every 10 mins. Returns: Returns a LoggingMetricHook that saves tensor values in a JSON format.
official/utils/logs/hooks_helper.py
get_logging_metric_hook
Mithilesh1609/assembled-cnn
python
def get_logging_metric_hook(tensors_to_log=None, every_n_secs=600, **kwargs): 'Function to get LoggingMetricHook.\n\n Args:\n tensors_to_log: List of tensor names or dictionary mapping labels to tensor\n names. If not set, log _TENSORS_TO_LOG by default.\n every_n_secs: `int`, the frequency for logging the metric. Default to every\n 10 mins.\n\n Returns:\n Returns a LoggingMetricHook that saves tensor values in a JSON format.\n ' if (tensors_to_log is None): tensors_to_log = _TENSORS_TO_LOG return metric_hook.LoggingMetricHook(tensors=tensors_to_log, metric_logger=logger.get_benchmark_logger(), every_n_secs=every_n_secs)
@property def _constructor(self): "\n Class constructor (for this class it's just `__class__`.\n " return type(self)
1,696,908,129,312,269,800
Class constructor (for this class it's just `__class__`.
pandas/core/base.py
_constructor
BryanRacic/pandas
python
@property def _constructor(self): "\n \n " return type(self)
def __repr__(self) -> str: '\n Return a string representation for a particular object.\n ' return object.__repr__(self)
3,016,805,634,138,606,600
Return a string representation for a particular object.
pandas/core/base.py
__repr__
BryanRacic/pandas
python
def __repr__(self) -> str: '\n \n ' return object.__repr__(self)
def _reset_cache(self, key: (str | None)=None) -> None: '\n Reset cached properties. If ``key`` is passed, only clears that key.\n ' if (not hasattr(self, '_cache')): return if (key is None): self._cache.clear() else: self._cache.pop(key, None)
8,692,063,033,057,934,000
Reset cached properties. If ``key`` is passed, only clears that key.
pandas/core/base.py
_reset_cache
BryanRacic/pandas
python
def _reset_cache(self, key: (str | None)=None) -> None: '\n \n ' if (not hasattr(self, '_cache')): return if (key is None): self._cache.clear() else: self._cache.pop(key, None)
def __sizeof__(self) -> int: '\n Generates the total memory usage for an object that returns\n either a value or Series of values\n ' memory_usage = getattr(self, 'memory_usage', None) if memory_usage: mem = memory_usage(deep=True) return int((mem if is_scalar(mem) else mem.sum())) return super().__sizeof__()
-6,205,035,213,557,224,000
Generates the total memory usage for an object that returns either a value or Series of values
pandas/core/base.py
__sizeof__
BryanRacic/pandas
python
def __sizeof__(self) -> int: '\n Generates the total memory usage for an object that returns\n either a value or Series of values\n ' memory_usage = getattr(self, 'memory_usage', None) if memory_usage: mem = memory_usage(deep=True) return int((mem if is_scalar(mem) else mem.sum())) return super().__sizeof__()
def _freeze(self): '\n Prevents setting additional attributes.\n ' object.__setattr__(self, '__frozen', True)
-7,029,115,408,159,700,000
Prevents setting additional attributes.
pandas/core/base.py
_freeze
BryanRacic/pandas
python
def _freeze(self): '\n \n ' object.__setattr__(self, '__frozen', True)
def _gotitem(self, key, ndim: int, subset=None): '\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : str / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n ' raise AbstractMethodError(self)
-4,390,008,202,310,129,000
sub-classes to define return a sliced object Parameters ---------- key : str / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on
pandas/core/base.py
_gotitem
BryanRacic/pandas
python
def _gotitem(self, key, ndim: int, subset=None): '\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : str / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n ' raise AbstractMethodError(self)
def transpose(self: _T, *args, **kwargs) -> _T: '\n Return the transpose, which is by definition self.\n\n Returns\n -------\n %(klass)s\n ' nv.validate_transpose(args, kwargs) return self
-278,207,969,909,020,400
Return the transpose, which is by definition self. Returns ------- %(klass)s
pandas/core/base.py
transpose
BryanRacic/pandas
python
def transpose(self: _T, *args, **kwargs) -> _T: '\n Return the transpose, which is by definition self.\n\n Returns\n -------\n %(klass)s\n ' nv.validate_transpose(args, kwargs) return self
@property def shape(self) -> Shape: '\n Return a tuple of the shape of the underlying data.\n ' return self._values.shape
-5,094,044,862,570,053,000
Return a tuple of the shape of the underlying data.
pandas/core/base.py
shape
BryanRacic/pandas
python
@property def shape(self) -> Shape: '\n \n ' return self._values.shape
@property def ndim(self) -> int: '\n Number of dimensions of the underlying data, by definition 1.\n ' return 1
-6,934,603,568,630,411,000
Number of dimensions of the underlying data, by definition 1.
pandas/core/base.py
ndim
BryanRacic/pandas
python
@property def ndim(self) -> int: '\n \n ' return 1
def item(self): '\n Return the first element of the underlying data as a Python scalar.\n\n Returns\n -------\n scalar\n The first element of %(klass)s.\n\n Raises\n ------\n ValueError\n If the data is not length-1.\n ' if (len(self) == 1): return next(iter(self)) raise ValueError('can only convert an array of size 1 to a Python scalar')
7,207,348,110,767,913,000
Return the first element of the underlying data as a Python scalar. Returns ------- scalar The first element of %(klass)s. Raises ------ ValueError If the data is not length-1.
pandas/core/base.py
item
BryanRacic/pandas
python
def item(self): '\n Return the first element of the underlying data as a Python scalar.\n\n Returns\n -------\n scalar\n The first element of %(klass)s.\n\n Raises\n ------\n ValueError\n If the data is not length-1.\n ' if (len(self) == 1): return next(iter(self)) raise ValueError('can only convert an array of size 1 to a Python scalar')
@property def nbytes(self) -> int: '\n Return the number of bytes in the underlying data.\n ' return self._values.nbytes
-8,601,156,375,750,126,000
Return the number of bytes in the underlying data.
pandas/core/base.py
nbytes
BryanRacic/pandas
python
@property def nbytes(self) -> int: '\n \n ' return self._values.nbytes
@property def size(self) -> int: '\n Return the number of elements in the underlying data.\n ' return len(self._values)
5,262,765,579,070,325,000
Return the number of elements in the underlying data.
pandas/core/base.py
size
BryanRacic/pandas
python
@property def size(self) -> int: '\n \n ' return len(self._values)
@property def array(self) -> ExtensionArray: "\n The ExtensionArray of the data backing this Series or Index.\n\n Returns\n -------\n ExtensionArray\n An ExtensionArray of the values stored within. For extension\n types, this is the actual array. For NumPy native types, this\n is a thin (no copy) wrapper around :class:`numpy.ndarray`.\n\n ``.array`` differs ``.values`` which may require converting the\n data to a different form.\n\n See Also\n --------\n Index.to_numpy : Similar method that always returns a NumPy array.\n Series.to_numpy : Similar method that always returns a NumPy array.\n\n Notes\n -----\n This table lays out the different array types for each extension\n dtype within pandas.\n\n ================== =============================\n dtype array type\n ================== =============================\n category Categorical\n period PeriodArray\n interval IntervalArray\n IntegerNA IntegerArray\n string StringArray\n boolean BooleanArray\n datetime64[ns, tz] DatetimeArray\n ================== =============================\n\n For any 3rd-party extension types, the array type will be an\n ExtensionArray.\n\n For all remaining dtypes ``.array`` will be a\n :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray\n stored within. If you absolutely need a NumPy array (possibly with\n copying / coercing data), then use :meth:`Series.to_numpy` instead.\n\n Examples\n --------\n For regular NumPy types like int, and float, a PandasArray\n is returned.\n\n >>> pd.Series([1, 2, 3]).array\n <PandasArray>\n [1, 2, 3]\n Length: 3, dtype: int64\n\n For extension types, like Categorical, the actual ExtensionArray\n is returned\n\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.array\n ['a', 'b', 'a']\n Categories (2, object): ['a', 'b']\n " raise AbstractMethodError(self)
7,608,311,211,353,715,000
The ExtensionArray of the data backing this Series or Index. Returns ------- ExtensionArray An ExtensionArray of the values stored within. For extension types, this is the actual array. For NumPy native types, this is a thin (no copy) wrapper around :class:`numpy.ndarray`. ``.array`` differs ``.values`` which may require converting the data to a different form. See Also -------- Index.to_numpy : Similar method that always returns a NumPy array. Series.to_numpy : Similar method that always returns a NumPy array. Notes ----- This table lays out the different array types for each extension dtype within pandas. ================== ============================= dtype array type ================== ============================= category Categorical period PeriodArray interval IntervalArray IntegerNA IntegerArray string StringArray boolean BooleanArray datetime64[ns, tz] DatetimeArray ================== ============================= For any 3rd-party extension types, the array type will be an ExtensionArray. For all remaining dtypes ``.array`` will be a :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray stored within. If you absolutely need a NumPy array (possibly with copying / coercing data), then use :meth:`Series.to_numpy` instead. Examples -------- For regular NumPy types like int, and float, a PandasArray is returned. >>> pd.Series([1, 2, 3]).array <PandasArray> [1, 2, 3] Length: 3, dtype: int64 For extension types, like Categorical, the actual ExtensionArray is returned >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.array ['a', 'b', 'a'] Categories (2, object): ['a', 'b']
pandas/core/base.py
array
BryanRacic/pandas
python
@property def array(self) -> ExtensionArray: "\n The ExtensionArray of the data backing this Series or Index.\n\n Returns\n -------\n ExtensionArray\n An ExtensionArray of the values stored within. For extension\n types, this is the actual array. For NumPy native types, this\n is a thin (no copy) wrapper around :class:`numpy.ndarray`.\n\n ``.array`` differs ``.values`` which may require converting the\n data to a different form.\n\n See Also\n --------\n Index.to_numpy : Similar method that always returns a NumPy array.\n Series.to_numpy : Similar method that always returns a NumPy array.\n\n Notes\n -----\n This table lays out the different array types for each extension\n dtype within pandas.\n\n ================== =============================\n dtype array type\n ================== =============================\n category Categorical\n period PeriodArray\n interval IntervalArray\n IntegerNA IntegerArray\n string StringArray\n boolean BooleanArray\n datetime64[ns, tz] DatetimeArray\n ================== =============================\n\n For any 3rd-party extension types, the array type will be an\n ExtensionArray.\n\n For all remaining dtypes ``.array`` will be a\n :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray\n stored within. If you absolutely need a NumPy array (possibly with\n copying / coercing data), then use :meth:`Series.to_numpy` instead.\n\n Examples\n --------\n For regular NumPy types like int, and float, a PandasArray\n is returned.\n\n >>> pd.Series([1, 2, 3]).array\n <PandasArray>\n [1, 2, 3]\n Length: 3, dtype: int64\n\n For extension types, like Categorical, the actual ExtensionArray\n is returned\n\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.array\n ['a', 'b', 'a']\n Categories (2, object): ['a', 'b']\n " raise AbstractMethodError(self)
def to_numpy(self, dtype: (npt.DTypeLike | None)=None, copy: bool=False, na_value=lib.no_default, **kwargs) -> np.ndarray: '\n A NumPy ndarray representing the values in this Series or Index.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n na_value : Any, optional\n The value to use for missing values. The default value depends\n on `dtype` and the type of the array.\n\n .. versionadded:: 1.0.0\n\n **kwargs\n Additional keywords passed through to the ``to_numpy`` method\n of the underlying array (for extension arrays).\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.array : Get the actual data stored within.\n Index.array : Get the actual data stored within.\n DataFrame.to_numpy : Similar method for DataFrame.\n\n Notes\n -----\n The returned array will be the same up to equality (values equal\n in `self` will be equal in the returned array; likewise for values\n that are not equal). When `self` contains an ExtensionArray, the\n dtype may be different. For example, for a category-dtype Series,\n ``to_numpy()`` will return a NumPy array and the categorical dtype\n will be lost.\n\n For NumPy dtypes, this will be a reference to the actual data stored\n in this Series or Index (assuming ``copy=False``). Modifying the result\n in place will modify the data stored in the Series or Index (not that\n we recommend doing that).\n\n For extension types, ``to_numpy()`` *may* require copying data and\n coercing the result to a NumPy type (possibly object), which may be\n expensive. When you need a no-copy reference to the underlying data,\n :attr:`Series.array` should be used instead.\n\n This table lays out the different dtypes and default return types of\n ``to_numpy()`` for various dtypes within pandas.\n\n ================== ================================\n dtype array type\n ================== ================================\n category[T] ndarray[T] (same dtype as input)\n period ndarray[object] (Periods)\n interval ndarray[object] (Intervals)\n IntegerNA ndarray[object]\n datetime64[ns] datetime64[ns]\n datetime64[ns, tz] ndarray[object] (Timestamps)\n ================== ================================\n\n Examples\n --------\n >>> ser = pd.Series(pd.Categorical([\'a\', \'b\', \'a\']))\n >>> ser.to_numpy()\n array([\'a\', \'b\', \'a\'], dtype=object)\n\n Specify the `dtype` to control how datetime-aware data is represented.\n Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`\n objects, each with the correct ``tz``.\n\n >>> ser = pd.Series(pd.date_range(\'2000\', periods=2, tz="CET"))\n >>> ser.to_numpy(dtype=object)\n array([Timestamp(\'2000-01-01 00:00:00+0100\', tz=\'CET\'),\n Timestamp(\'2000-01-02 00:00:00+0100\', tz=\'CET\')],\n dtype=object)\n\n Or ``dtype=\'datetime64[ns]\'`` to return an ndarray of native\n datetime64 values. The values are converted to UTC and the timezone\n info is dropped.\n\n >>> ser.to_numpy(dtype="datetime64[ns]")\n ... # doctest: +ELLIPSIS\n array([\'1999-12-31T23:00:00.000000000\', \'2000-01-01T23:00:00...\'],\n dtype=\'datetime64[ns]\')\n ' if is_extension_array_dtype(self.dtype): return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs) elif kwargs: bad_keys = list(kwargs.keys())[0] raise TypeError(f"to_numpy() got an unexpected keyword argument '{bad_keys}'") result = np.asarray(self._values, dtype=dtype) if (copy or (na_value is not lib.no_default)): result = result.copy() if (na_value is not lib.no_default): result[self.isna()] = na_value return result
4,357,615,367,760,797,000
A NumPy ndarray representing the values in this Series or Index. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the type of the array. .. versionadded:: 1.0.0 **kwargs Additional keywords passed through to the ``to_numpy`` method of the underlying array (for extension arrays). .. versionadded:: 1.0.0 Returns ------- numpy.ndarray See Also -------- Series.array : Get the actual data stored within. Index.array : Get the actual data stored within. DataFrame.to_numpy : Similar method for DataFrame. Notes ----- The returned array will be the same up to equality (values equal in `self` will be equal in the returned array; likewise for values that are not equal). When `self` contains an ExtensionArray, the dtype may be different. For example, for a category-dtype Series, ``to_numpy()`` will return a NumPy array and the categorical dtype will be lost. For NumPy dtypes, this will be a reference to the actual data stored in this Series or Index (assuming ``copy=False``). Modifying the result in place will modify the data stored in the Series or Index (not that we recommend doing that). For extension types, ``to_numpy()`` *may* require copying data and coercing the result to a NumPy type (possibly object), which may be expensive. When you need a no-copy reference to the underlying data, :attr:`Series.array` should be used instead. This table lays out the different dtypes and default return types of ``to_numpy()`` for various dtypes within pandas. ================== ================================ dtype array type ================== ================================ category[T] ndarray[T] (same dtype as input) period ndarray[object] (Periods) interval ndarray[object] (Intervals) IntegerNA ndarray[object] datetime64[ns] datetime64[ns] datetime64[ns, tz] ndarray[object] (Timestamps) ================== ================================ Examples -------- >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.to_numpy() array(['a', 'b', 'a'], dtype=object) Specify the `dtype` to control how datetime-aware data is represented. Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp` objects, each with the correct ``tz``. >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> ser.to_numpy(dtype=object) array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or ``dtype='datetime64[ns]'`` to return an ndarray of native datetime64 values. The values are converted to UTC and the timezone info is dropped. >>> ser.to_numpy(dtype="datetime64[ns]") ... # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], dtype='datetime64[ns]')
pandas/core/base.py
to_numpy
BryanRacic/pandas
python
def to_numpy(self, dtype: (npt.DTypeLike | None)=None, copy: bool=False, na_value=lib.no_default, **kwargs) -> np.ndarray: '\n A NumPy ndarray representing the values in this Series or Index.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n na_value : Any, optional\n The value to use for missing values. The default value depends\n on `dtype` and the type of the array.\n\n .. versionadded:: 1.0.0\n\n **kwargs\n Additional keywords passed through to the ``to_numpy`` method\n of the underlying array (for extension arrays).\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.array : Get the actual data stored within.\n Index.array : Get the actual data stored within.\n DataFrame.to_numpy : Similar method for DataFrame.\n\n Notes\n -----\n The returned array will be the same up to equality (values equal\n in `self` will be equal in the returned array; likewise for values\n that are not equal). When `self` contains an ExtensionArray, the\n dtype may be different. For example, for a category-dtype Series,\n ``to_numpy()`` will return a NumPy array and the categorical dtype\n will be lost.\n\n For NumPy dtypes, this will be a reference to the actual data stored\n in this Series or Index (assuming ``copy=False``). Modifying the result\n in place will modify the data stored in the Series or Index (not that\n we recommend doing that).\n\n For extension types, ``to_numpy()`` *may* require copying data and\n coercing the result to a NumPy type (possibly object), which may be\n expensive. When you need a no-copy reference to the underlying data,\n :attr:`Series.array` should be used instead.\n\n This table lays out the different dtypes and default return types of\n ``to_numpy()`` for various dtypes within pandas.\n\n ================== ================================\n dtype array type\n ================== ================================\n category[T] ndarray[T] (same dtype as input)\n period ndarray[object] (Periods)\n interval ndarray[object] (Intervals)\n IntegerNA ndarray[object]\n datetime64[ns] datetime64[ns]\n datetime64[ns, tz] ndarray[object] (Timestamps)\n ================== ================================\n\n Examples\n --------\n >>> ser = pd.Series(pd.Categorical([\'a\', \'b\', \'a\']))\n >>> ser.to_numpy()\n array([\'a\', \'b\', \'a\'], dtype=object)\n\n Specify the `dtype` to control how datetime-aware data is represented.\n Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`\n objects, each with the correct ``tz``.\n\n >>> ser = pd.Series(pd.date_range(\'2000\', periods=2, tz="CET"))\n >>> ser.to_numpy(dtype=object)\n array([Timestamp(\'2000-01-01 00:00:00+0100\', tz=\'CET\'),\n Timestamp(\'2000-01-02 00:00:00+0100\', tz=\'CET\')],\n dtype=object)\n\n Or ``dtype=\'datetime64[ns]\'`` to return an ndarray of native\n datetime64 values. The values are converted to UTC and the timezone\n info is dropped.\n\n >>> ser.to_numpy(dtype="datetime64[ns]")\n ... # doctest: +ELLIPSIS\n array([\'1999-12-31T23:00:00.000000000\', \'2000-01-01T23:00:00...\'],\n dtype=\'datetime64[ns]\')\n ' if is_extension_array_dtype(self.dtype): return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs) elif kwargs: bad_keys = list(kwargs.keys())[0] raise TypeError(f"to_numpy() got an unexpected keyword argument '{bad_keys}'") result = np.asarray(self._values, dtype=dtype) if (copy or (na_value is not lib.no_default)): result = result.copy() if (na_value is not lib.no_default): result[self.isna()] = na_value return result
def max(self, axis=None, skipna: bool=True, *args, **kwargs): "\n Return the maximum value of the Index.\n\n Parameters\n ----------\n axis : int, optional\n For compatibility with NumPy. Only 0 or None are allowed.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n scalar\n Maximum value.\n\n See Also\n --------\n Index.min : Return the minimum value in an Index.\n Series.max : Return the maximum value in a Series.\n DataFrame.max : Return the maximum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.max()\n 3\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.max()\n 'c'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.max()\n ('b', 2)\n " nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return nanops.nanmax(self._values, skipna=skipna)
4,268,672,646,825,609,700
Return the maximum value of the Index. Parameters ---------- axis : int, optional For compatibility with NumPy. Only 0 or None are allowed. skipna : bool, default True Exclude NA/null values when showing the result. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- scalar Maximum value. See Also -------- Index.min : Return the minimum value in an Index. Series.max : Return the maximum value in a Series. DataFrame.max : Return the maximum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.max() 3 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.max() 'c' For a MultiIndex, the maximum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.max() ('b', 2)
pandas/core/base.py
max
BryanRacic/pandas
python
def max(self, axis=None, skipna: bool=True, *args, **kwargs): "\n Return the maximum value of the Index.\n\n Parameters\n ----------\n axis : int, optional\n For compatibility with NumPy. Only 0 or None are allowed.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n scalar\n Maximum value.\n\n See Also\n --------\n Index.min : Return the minimum value in an Index.\n Series.max : Return the maximum value in a Series.\n DataFrame.max : Return the maximum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.max()\n 3\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.max()\n 'c'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.max()\n ('b', 2)\n " nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return nanops.nanmax(self._values, skipna=skipna)
@doc(op='max', oppose='min', value='largest') def argmax(self, axis=None, skipna: bool=True, *args, **kwargs) -> int: "\n Return int position of the {value} value in the Series.\n\n If the {op}imum is achieved in multiple locations,\n the first row position is returned.\n\n Parameters\n ----------\n axis : {{None}}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n int\n Row position of the {op}imum value.\n\n See Also\n --------\n Series.arg{op} : Return position of the {op}imum value.\n Series.arg{oppose} : Return position of the {oppose}imum value.\n numpy.ndarray.arg{op} : Equivalent method for numpy arrays.\n Series.idxmax : Return index label of the maximum values.\n Series.idxmin : Return index label of the minimum values.\n\n Examples\n --------\n Consider dataset containing cereal calories\n\n >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,\n ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})\n >>> s\n Corn Flakes 100.0\n Almond Delight 110.0\n Cinnamon Toast Crunch 120.0\n Cocoa Puff 110.0\n dtype: float64\n\n >>> s.argmax()\n 2\n >>> s.argmin()\n 0\n\n The maximum cereal calories is the third element and\n the minimum cereal calories is the first element,\n since series is zero-indexed.\n " delegate = self._values nv.validate_minmax_axis(axis) skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): if ((not skipna) and delegate.isna().any()): return (- 1) else: return delegate.argmax() else: return nanops.nanargmax(delegate, skipna=skipna)
2,756,451,465,106,003,500
Return int position of the {value} value in the Series. If the {op}imum is achieved in multiple locations, the first row position is returned. Parameters ---------- axis : {{None}} Dummy argument for consistency with Series. skipna : bool, default True Exclude NA/null values when showing the result. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- int Row position of the {op}imum value. See Also -------- Series.arg{op} : Return position of the {op}imum value. Series.arg{oppose} : Return position of the {oppose}imum value. numpy.ndarray.arg{op} : Equivalent method for numpy arrays. Series.idxmax : Return index label of the maximum values. Series.idxmin : Return index label of the minimum values. Examples -------- Consider dataset containing cereal calories >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0, ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}}) >>> s Corn Flakes 100.0 Almond Delight 110.0 Cinnamon Toast Crunch 120.0 Cocoa Puff 110.0 dtype: float64 >>> s.argmax() 2 >>> s.argmin() 0 The maximum cereal calories is the third element and the minimum cereal calories is the first element, since series is zero-indexed.
pandas/core/base.py
argmax
BryanRacic/pandas
python
@doc(op='max', oppose='min', value='largest') def argmax(self, axis=None, skipna: bool=True, *args, **kwargs) -> int: "\n Return int position of the {value} value in the Series.\n\n If the {op}imum is achieved in multiple locations,\n the first row position is returned.\n\n Parameters\n ----------\n axis : {{None}}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n int\n Row position of the {op}imum value.\n\n See Also\n --------\n Series.arg{op} : Return position of the {op}imum value.\n Series.arg{oppose} : Return position of the {oppose}imum value.\n numpy.ndarray.arg{op} : Equivalent method for numpy arrays.\n Series.idxmax : Return index label of the maximum values.\n Series.idxmin : Return index label of the minimum values.\n\n Examples\n --------\n Consider dataset containing cereal calories\n\n >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,\n ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})\n >>> s\n Corn Flakes 100.0\n Almond Delight 110.0\n Cinnamon Toast Crunch 120.0\n Cocoa Puff 110.0\n dtype: float64\n\n >>> s.argmax()\n 2\n >>> s.argmin()\n 0\n\n The maximum cereal calories is the third element and\n the minimum cereal calories is the first element,\n since series is zero-indexed.\n " delegate = self._values nv.validate_minmax_axis(axis) skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): if ((not skipna) and delegate.isna().any()): return (- 1) else: return delegate.argmax() else: return nanops.nanargmax(delegate, skipna=skipna)
def min(self, axis=None, skipna: bool=True, *args, **kwargs): "\n Return the minimum value of the Index.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n scalar\n Minimum value.\n\n See Also\n --------\n Index.max : Return the maximum value of the object.\n Series.min : Return the minimum value in a Series.\n DataFrame.min : Return the minimum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.min()\n 1\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.min()\n 'a'\n\n For a MultiIndex, the minimum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.min()\n ('a', 1)\n " nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return nanops.nanmin(self._values, skipna=skipna)
4,895,894,055,981,468,000
Return the minimum value of the Index. Parameters ---------- axis : {None} Dummy argument for consistency with Series. skipna : bool, default True Exclude NA/null values when showing the result. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- scalar Minimum value. See Also -------- Index.max : Return the maximum value of the object. Series.min : Return the minimum value in a Series. DataFrame.min : Return the minimum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.min() 1 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.min() 'a' For a MultiIndex, the minimum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.min() ('a', 1)
pandas/core/base.py
min
BryanRacic/pandas
python
def min(self, axis=None, skipna: bool=True, *args, **kwargs): "\n Return the minimum value of the Index.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n scalar\n Minimum value.\n\n See Also\n --------\n Index.max : Return the maximum value of the object.\n Series.min : Return the minimum value in a Series.\n DataFrame.min : Return the minimum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.min()\n 1\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.min()\n 'a'\n\n For a MultiIndex, the minimum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.min()\n ('a', 1)\n " nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return nanops.nanmin(self._values, skipna=skipna)
def tolist(self): '\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n list\n\n See Also\n --------\n numpy.ndarray.tolist : Return the array as an a.ndim-levels deep\n nested list of Python scalars.\n ' if (not isinstance(self._values, np.ndarray)): return list(self._values) return self._values.tolist()
4,623,667,165,696,130,000
Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list See Also -------- numpy.ndarray.tolist : Return the array as an a.ndim-levels deep nested list of Python scalars.
pandas/core/base.py
tolist
BryanRacic/pandas
python
def tolist(self): '\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n list\n\n See Also\n --------\n numpy.ndarray.tolist : Return the array as an a.ndim-levels deep\n nested list of Python scalars.\n ' if (not isinstance(self._values, np.ndarray)): return list(self._values) return self._values.tolist()
def __iter__(self): '\n Return an iterator of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n iterator\n ' if (not isinstance(self._values, np.ndarray)): return iter(self._values) else: return map(self._values.item, range(self._values.size))
6,172,453,236,682,056,000
Return an iterator of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- iterator
pandas/core/base.py
__iter__
BryanRacic/pandas
python
def __iter__(self): '\n Return an iterator of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n iterator\n ' if (not isinstance(self._values, np.ndarray)): return iter(self._values) else: return map(self._values.item, range(self._values.size))
@cache_readonly def hasnans(self) -> bool: '\n Return if I have any nans; enables various perf speedups.\n ' return bool(isna(self).any())
-3,629,634,497,472,234,000
Return if I have any nans; enables various perf speedups.
pandas/core/base.py
hasnans
BryanRacic/pandas
python
@cache_readonly def hasnans(self) -> bool: '\n \n ' return bool(isna(self).any())
def _reduce(self, op, name: str, *, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): '\n Perform the reduction type operation if we can.\n ' func = getattr(self, name, None) if (func is None): raise TypeError(f'{type(self).__name__} cannot perform the operation {name}') return func(skipna=skipna, **kwds)
-6,248,915,611,062,733,000
Perform the reduction type operation if we can.
pandas/core/base.py
_reduce
BryanRacic/pandas
python
def _reduce(self, op, name: str, *, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): '\n \n ' func = getattr(self, name, None) if (func is None): raise TypeError(f'{type(self).__name__} cannot perform the operation {name}') return func(skipna=skipna, **kwds)
@final def _map_values(self, mapper, na_action=None): "\n An internal function that maps values using the input\n correspondence (which can be a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n The input correspondence object\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping function\n\n Returns\n -------\n Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n " if is_dict_like(mapper): if (isinstance(mapper, dict) and hasattr(mapper, '__missing__')): dict_with_default = mapper mapper = (lambda x: dict_with_default[x]) else: mapper = create_series_with_explicit_dtype(mapper, dtype_if_empty=np.float64) if isinstance(mapper, ABCSeries): if is_categorical_dtype(self.dtype): cat = cast('Categorical', self._values) return cat.map(mapper) values = self._values indexer = mapper.index.get_indexer(values) new_values = algorithms.take_nd(mapper._values, indexer) return new_values if (is_extension_array_dtype(self.dtype) and hasattr(self._values, 'map')): values = self._values if (na_action is not None): raise NotImplementedError map_f = (lambda values, f: values.map(f)) else: values = self._values.astype(object) if (na_action == 'ignore'): map_f = (lambda values, f: lib.map_infer_mask(values, f, isna(values).view(np.uint8))) elif (na_action is None): map_f = lib.map_infer else: msg = f"na_action must either be 'ignore' or None, {na_action} was passed" raise ValueError(msg) new_values = map_f(values, mapper) return new_values
4,348,222,714,382,488,000
An internal function that maps values using the input correspondence (which can be a dict, Series, or function). Parameters ---------- mapper : function, dict, or Series The input correspondence object na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping function Returns ------- Union[Index, MultiIndex], inferred The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned.
pandas/core/base.py
_map_values
BryanRacic/pandas
python
@final def _map_values(self, mapper, na_action=None): "\n An internal function that maps values using the input\n correspondence (which can be a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n The input correspondence object\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping function\n\n Returns\n -------\n Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n " if is_dict_like(mapper): if (isinstance(mapper, dict) and hasattr(mapper, '__missing__')): dict_with_default = mapper mapper = (lambda x: dict_with_default[x]) else: mapper = create_series_with_explicit_dtype(mapper, dtype_if_empty=np.float64) if isinstance(mapper, ABCSeries): if is_categorical_dtype(self.dtype): cat = cast('Categorical', self._values) return cat.map(mapper) values = self._values indexer = mapper.index.get_indexer(values) new_values = algorithms.take_nd(mapper._values, indexer) return new_values if (is_extension_array_dtype(self.dtype) and hasattr(self._values, 'map')): values = self._values if (na_action is not None): raise NotImplementedError map_f = (lambda values, f: values.map(f)) else: values = self._values.astype(object) if (na_action == 'ignore'): map_f = (lambda values, f: lib.map_infer_mask(values, f, isna(values).view(np.uint8))) elif (na_action is None): map_f = lib.map_infer else: msg = f"na_action must either be 'ignore' or None, {na_action} was passed" raise ValueError(msg) new_values = map_f(values, mapper) return new_values
def value_counts(self, normalize: bool=False, sort: bool=True, ascending: bool=False, bins=None, dropna: bool=True): "\n Return a Series containing counts of unique values.\n\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : bool, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n bins : int, optional\n Rather than count values, group them into half-open bins,\n a convenience for ``pd.cut``, only works with numeric data.\n dropna : bool, default True\n Don't include counts of NaN.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.count: Number of non-NA elements in a DataFrame.\n DataFrame.value_counts: Equivalent method on DataFrames.\n\n Examples\n --------\n >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])\n >>> index.value_counts()\n 3.0 2\n 1.0 1\n 2.0 1\n 4.0 1\n dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])\n >>> s.value_counts(normalize=True)\n 3.0 0.4\n 1.0 0.2\n 2.0 0.2\n 4.0 0.2\n dtype: float64\n\n **bins**\n\n Bins can be useful for going from a continuous variable to a\n categorical variable; instead of counting unique\n apparitions of values, divide the index in the specified\n number of half-open bins.\n\n >>> s.value_counts(bins=3)\n (0.996, 2.0] 2\n (2.0, 3.0] 2\n (3.0, 4.0] 1\n dtype: int64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> s.value_counts(dropna=False)\n 3.0 2\n 1.0 1\n 2.0 1\n 4.0 1\n NaN 1\n dtype: int64\n " return value_counts(self, sort=sort, ascending=ascending, normalize=normalize, bins=bins, dropna=dropna)
2,223,745,085,199,082,000
Return a Series containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. Excludes NA values by default. Parameters ---------- normalize : bool, default False If True then the object returned will contain the relative frequencies of the unique values. sort : bool, default True Sort by frequencies. ascending : bool, default False Sort in ascending order. bins : int, optional Rather than count values, group them into half-open bins, a convenience for ``pd.cut``, only works with numeric data. dropna : bool, default True Don't include counts of NaN. Returns ------- Series See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.count: Number of non-NA elements in a DataFrame. DataFrame.value_counts: Equivalent method on DataFrames. Examples -------- >>> index = pd.Index([3, 1, 2, 3, 4, np.nan]) >>> index.value_counts() 3.0 2 1.0 1 2.0 1 4.0 1 dtype: int64 With `normalize` set to `True`, returns the relative frequency by dividing all values by the sum of values. >>> s = pd.Series([3, 1, 2, 3, 4, np.nan]) >>> s.value_counts(normalize=True) 3.0 0.4 1.0 0.2 2.0 0.2 4.0 0.2 dtype: float64 **bins** Bins can be useful for going from a continuous variable to a categorical variable; instead of counting unique apparitions of values, divide the index in the specified number of half-open bins. >>> s.value_counts(bins=3) (0.996, 2.0] 2 (2.0, 3.0] 2 (3.0, 4.0] 1 dtype: int64 **dropna** With `dropna` set to `False` we can also see NaN index values. >>> s.value_counts(dropna=False) 3.0 2 1.0 1 2.0 1 4.0 1 NaN 1 dtype: int64
pandas/core/base.py
value_counts
BryanRacic/pandas
python
def value_counts(self, normalize: bool=False, sort: bool=True, ascending: bool=False, bins=None, dropna: bool=True): "\n Return a Series containing counts of unique values.\n\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : bool, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n bins : int, optional\n Rather than count values, group them into half-open bins,\n a convenience for ``pd.cut``, only works with numeric data.\n dropna : bool, default True\n Don't include counts of NaN.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.count: Number of non-NA elements in a DataFrame.\n DataFrame.value_counts: Equivalent method on DataFrames.\n\n Examples\n --------\n >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])\n >>> index.value_counts()\n 3.0 2\n 1.0 1\n 2.0 1\n 4.0 1\n dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])\n >>> s.value_counts(normalize=True)\n 3.0 0.4\n 1.0 0.2\n 2.0 0.2\n 4.0 0.2\n dtype: float64\n\n **bins**\n\n Bins can be useful for going from a continuous variable to a\n categorical variable; instead of counting unique\n apparitions of values, divide the index in the specified\n number of half-open bins.\n\n >>> s.value_counts(bins=3)\n (0.996, 2.0] 2\n (2.0, 3.0] 2\n (3.0, 4.0] 1\n dtype: int64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> s.value_counts(dropna=False)\n 3.0 2\n 1.0 1\n 2.0 1\n 4.0 1\n NaN 1\n dtype: int64\n " return value_counts(self, sort=sort, ascending=ascending, normalize=normalize, bins=bins, dropna=dropna)
def nunique(self, dropna: bool=True) -> int: "\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the count.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 3, 5, 7, 7])\n >>> s\n 0 1\n 1 3\n 2 5\n 3 7\n 4 7\n dtype: int64\n\n >>> s.nunique()\n 4\n " uniqs = self.unique() if dropna: uniqs = remove_na_arraylike(uniqs) return len(uniqs)
8,887,803,906,589,405,000
Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- dropna : bool, default True Don't include NaN in the count. Returns ------- int See Also -------- DataFrame.nunique: Method nunique for DataFrame. Series.count: Count non-NA/null observations in the Series. Examples -------- >>> s = pd.Series([1, 3, 5, 7, 7]) >>> s 0 1 1 3 2 5 3 7 4 7 dtype: int64 >>> s.nunique() 4
pandas/core/base.py
nunique
BryanRacic/pandas
python
def nunique(self, dropna: bool=True) -> int: "\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the count.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 3, 5, 7, 7])\n >>> s\n 0 1\n 1 3\n 2 5\n 3 7\n 4 7\n dtype: int64\n\n >>> s.nunique()\n 4\n " uniqs = self.unique() if dropna: uniqs = remove_na_arraylike(uniqs) return len(uniqs)
@property def is_unique(self) -> bool: '\n Return boolean if values in the object are unique.\n\n Returns\n -------\n bool\n ' return (self.nunique(dropna=False) == len(self))
-1,875,521,501,258,616,300
Return boolean if values in the object are unique. Returns ------- bool
pandas/core/base.py
is_unique
BryanRacic/pandas
python
@property def is_unique(self) -> bool: '\n Return boolean if values in the object are unique.\n\n Returns\n -------\n bool\n ' return (self.nunique(dropna=False) == len(self))
@property def is_monotonic(self) -> bool: '\n Return boolean if values in the object are\n monotonic_increasing.\n\n Returns\n -------\n bool\n ' from pandas import Index return Index(self).is_monotonic
-1,426,646,453,358,816,000
Return boolean if values in the object are monotonic_increasing. Returns ------- bool
pandas/core/base.py
is_monotonic
BryanRacic/pandas
python
@property def is_monotonic(self) -> bool: '\n Return boolean if values in the object are\n monotonic_increasing.\n\n Returns\n -------\n bool\n ' from pandas import Index return Index(self).is_monotonic
@property def is_monotonic_increasing(self) -> bool: '\n Alias for is_monotonic.\n ' return self.is_monotonic
7,444,097,157,233,163,000
Alias for is_monotonic.
pandas/core/base.py
is_monotonic_increasing
BryanRacic/pandas
python
@property def is_monotonic_increasing(self) -> bool: '\n \n ' return self.is_monotonic
@property def is_monotonic_decreasing(self) -> bool: '\n Return boolean if values in the object are\n monotonic_decreasing.\n\n Returns\n -------\n bool\n ' from pandas import Index return Index(self).is_monotonic_decreasing
-4,775,814,088,260,608,000
Return boolean if values in the object are monotonic_decreasing. Returns ------- bool
pandas/core/base.py
is_monotonic_decreasing
BryanRacic/pandas
python
@property def is_monotonic_decreasing(self) -> bool: '\n Return boolean if values in the object are\n monotonic_decreasing.\n\n Returns\n -------\n bool\n ' from pandas import Index return Index(self).is_monotonic_decreasing
def _memory_usage(self, deep: bool=False) -> int: '\n Memory usage of the values.\n\n Parameters\n ----------\n deep : bool, default False\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption.\n\n Returns\n -------\n bytes used\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of the\n array.\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False or if used on PyPy\n ' if hasattr(self.array, 'memory_usage'): return self.array.memory_usage(deep=deep) v = self.array.nbytes if (deep and is_object_dtype(self) and (not PYPY)): values = cast(np.ndarray, self._values) v += lib.memory_usage_of_objects(values) return v
2,268,990,164,839,207,200
Memory usage of the values. Parameters ---------- deep : bool, default False Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption. Returns ------- bytes used See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of the array. Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False or if used on PyPy
pandas/core/base.py
_memory_usage
BryanRacic/pandas
python
def _memory_usage(self, deep: bool=False) -> int: '\n Memory usage of the values.\n\n Parameters\n ----------\n deep : bool, default False\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption.\n\n Returns\n -------\n bytes used\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of the\n array.\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False or if used on PyPy\n ' if hasattr(self.array, 'memory_usage'): return self.array.memory_usage(deep=deep) v = self.array.nbytes if (deep and is_object_dtype(self) and (not PYPY)): values = cast(np.ndarray, self._values) v += lib.memory_usage_of_objects(values) return v
def _construct_result(self, result, name): '\n Construct an appropriately-wrapped result from the ArrayLike result\n of an arithmetic-like operation.\n ' raise AbstractMethodError(self)
7,152,423,457,140,417,000
Construct an appropriately-wrapped result from the ArrayLike result of an arithmetic-like operation.
pandas/core/base.py
_construct_result
BryanRacic/pandas
python
def _construct_result(self, result, name): '\n Construct an appropriately-wrapped result from the ArrayLike result\n of an arithmetic-like operation.\n ' raise AbstractMethodError(self)
def test_random_grid_search_for_glm(): '\n Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian\n or Binomial families.\n\n :return: None\n ' test_glm_gaussian_random_grid = Test_glm_random_grid_search('gaussian') test_glm_gaussian_random_grid.test1_glm_random_grid_search_model_number('mse(xval=True)') test_glm_gaussian_random_grid.test2_glm_random_grid_search_max_model() test_glm_gaussian_random_grid.test3_glm_random_grid_search_max_runtime_secs() test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric('MSE', False) if (test_glm_gaussian_random_grid.test_failed > 0): sys.exit(1) else: pyunit_utils.remove_files(os.path.join(test_glm_gaussian_random_grid.current_dir, test_glm_gaussian_random_grid.json_filename))
-2,897,046,621,495,929,300
Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian or Binomial families. :return: None
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
test_random_grid_search_for_glm
13927729580/h2o-3
python
def test_random_grid_search_for_glm(): '\n Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian\n or Binomial families.\n\n :return: None\n ' test_glm_gaussian_random_grid = Test_glm_random_grid_search('gaussian') test_glm_gaussian_random_grid.test1_glm_random_grid_search_model_number('mse(xval=True)') test_glm_gaussian_random_grid.test2_glm_random_grid_search_max_model() test_glm_gaussian_random_grid.test3_glm_random_grid_search_max_runtime_secs() test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric('MSE', False) if (test_glm_gaussian_random_grid.test_failed > 0): sys.exit(1) else: pyunit_utils.remove_files(os.path.join(test_glm_gaussian_random_grid.current_dir, test_glm_gaussian_random_grid.json_filename))
def __init__(self, family): '\n Constructor.\n\n :param family: distribution family for tests\n :return: None\n ' self.setup_data() self.setup_grid_params()
5,059,522,531,481,194,000
Constructor. :param family: distribution family for tests :return: None
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
__init__
13927729580/h2o-3
python
def __init__(self, family): '\n Constructor.\n\n :param family: distribution family for tests\n :return: None\n ' self.setup_data() self.setup_grid_params()
def setup_data(self): '\n This function performs all initializations necessary:\n load the data sets and set the training set indices and response column index\n ' self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True) self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename)) self.y_index = (self.training1_data.ncol - 1) self.x_indices = list(range(self.y_index)) pyunit_utils.remove_csv_files(self.current_dir, '.csv', action='copy', new_dir_path=self.sandbox_dir)
-6,597,030,469,623,763,000
This function performs all initializations necessary: load the data sets and set the training set indices and response column index
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
setup_data
13927729580/h2o-3
python
def setup_data(self): '\n This function performs all initializations necessary:\n load the data sets and set the training set indices and response column index\n ' self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True) self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename)) self.y_index = (self.training1_data.ncol - 1) self.x_indices = list(range(self.y_index)) pyunit_utils.remove_csv_files(self.current_dir, '.csv', action='copy', new_dir_path=self.sandbox_dir)
def setup_grid_params(self): '\n This function setup the randomized gridsearch parameters that will be used later on:\n\n 1. It will first try to grab all the parameters that are griddable and parameters used by GLM.\n 2. It will find the intersection of parameters that are both griddable and used by GLM.\n 3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.\n These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.\n 4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.\n For enums, we will include all of them.\n\n :return: None\n ' model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds) model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.one_model_time = pyunit_utils.find_grid_runtime([model]) print('Time taken to build a base barebone model is {0}'.format(self.one_model_time)) (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = pyunit_utils.get_gridables(model._model_json['parameters']) self.hyper_params = {} self.hyper_params['fold_assignment'] = ['AUTO', 'Random', 'Modulo'] self.hyper_params['missing_values_handling'] = ['MeanImputation', 'Skip'] (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists, self.gridable_parameters, self.gridable_types, self.gridable_defaults, random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val, random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val) if ('lambda' in list(self.hyper_params)): self.hyper_params['lambda'] = [(self.lambda_scale * x) for x in self.hyper_params['lambda']] time_scale = (self.max_runtime_scale * self.one_model_time) if ('max_runtime_secs' in list(self.hyper_params)): self.hyper_params['max_runtime_secs'] = [(time_scale * x) for x in self.hyper_params['max_runtime_secs']] self.possible_number_models = pyunit_utils.count_models(self.hyper_params) pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename, self.hyper_params)
8,059,243,211,549,357,000
This function setup the randomized gridsearch parameters that will be used later on: 1. It will first try to grab all the parameters that are griddable and parameters used by GLM. 2. It will find the intersection of parameters that are both griddable and used by GLM. 3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not. These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists. 4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly. For enums, we will include all of them. :return: None
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
setup_grid_params
13927729580/h2o-3
python
def setup_grid_params(self): '\n This function setup the randomized gridsearch parameters that will be used later on:\n\n 1. It will first try to grab all the parameters that are griddable and parameters used by GLM.\n 2. It will find the intersection of parameters that are both griddable and used by GLM.\n 3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.\n These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.\n 4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.\n For enums, we will include all of them.\n\n :return: None\n ' model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds) model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.one_model_time = pyunit_utils.find_grid_runtime([model]) print('Time taken to build a base barebone model is {0}'.format(self.one_model_time)) (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = pyunit_utils.get_gridables(model._model_json['parameters']) self.hyper_params = {} self.hyper_params['fold_assignment'] = ['AUTO', 'Random', 'Modulo'] self.hyper_params['missing_values_handling'] = ['MeanImputation', 'Skip'] (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists, self.gridable_parameters, self.gridable_types, self.gridable_defaults, random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val, random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val) if ('lambda' in list(self.hyper_params)): self.hyper_params['lambda'] = [(self.lambda_scale * x) for x in self.hyper_params['lambda']] time_scale = (self.max_runtime_scale * self.one_model_time) if ('max_runtime_secs' in list(self.hyper_params)): self.hyper_params['max_runtime_secs'] = [(time_scale * x) for x in self.hyper_params['max_runtime_secs']] self.possible_number_models = pyunit_utils.count_models(self.hyper_params) pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename, self.hyper_params)
def tear_down(self): '\n This function performs teardown after the dynamic test is completed. If all tests\n passed, it will delete all data sets generated since they can be quite large. It\n will move the training/validation/test data sets into a Rsandbox directory so that\n we can re-run the failed test.\n ' if self.test_failed: self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True) pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename) json_file = os.path.join(self.sandbox_dir, self.json_filename) with open(json_file, 'wb') as test_file: json.dump(self.hyper_params, test_file) else: pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
551,565,696,271,934,300
This function performs teardown after the dynamic test is completed. If all tests passed, it will delete all data sets generated since they can be quite large. It will move the training/validation/test data sets into a Rsandbox directory so that we can re-run the failed test.
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
tear_down
13927729580/h2o-3
python
def tear_down(self): '\n This function performs teardown after the dynamic test is completed. If all tests\n passed, it will delete all data sets generated since they can be quite large. It\n will move the training/validation/test data sets into a Rsandbox directory so that\n we can re-run the failed test.\n ' if self.test_failed: self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True) pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename) json_file = os.path.join(self.sandbox_dir, self.json_filename) with open(json_file, 'wb') as test_file: json.dump(self.hyper_params, test_file) else: pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
def test1_glm_random_grid_search_model_number(self, metric_name): '\n This test is used to make sure the randomized gridsearch will generate all models specified in the\n hyperparameters if no stopping condition is given in the search criterion.\n\n :param metric_name: string to denote what grid search model should be sort by\n\n :return: None\n ' print('*******************************************************************************************') print(('test1_glm_random_grid_search_model_number for GLM ' + self.family)) h2o.cluster_info() search_criteria = {'strategy': 'RandomDiscrete', 'stopping_rounds': 0, 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) random_grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) if (not (len(random_grid_model) == self.possible_number_models)): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test1_glm_random_grid_search_model_number for GLM: failed, number of models generatedpossible model number {0} and randomized gridsearch model number {1} are not equal.'.format(self.possible_number_models, len(random_grid_model))) else: self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model) if (self.test_failed_array[self.test_num] == 0): print('test1_glm_random_grid_search_model_number for GLM: passed!') self.test_num += 1 sys.stdout.flush()
1,333,334,887,265,819,400
This test is used to make sure the randomized gridsearch will generate all models specified in the hyperparameters if no stopping condition is given in the search criterion. :param metric_name: string to denote what grid search model should be sort by :return: None
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
test1_glm_random_grid_search_model_number
13927729580/h2o-3
python
def test1_glm_random_grid_search_model_number(self, metric_name): '\n This test is used to make sure the randomized gridsearch will generate all models specified in the\n hyperparameters if no stopping condition is given in the search criterion.\n\n :param metric_name: string to denote what grid search model should be sort by\n\n :return: None\n ' print('*******************************************************************************************') print(('test1_glm_random_grid_search_model_number for GLM ' + self.family)) h2o.cluster_info() search_criteria = {'strategy': 'RandomDiscrete', 'stopping_rounds': 0, 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) random_grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) if (not (len(random_grid_model) == self.possible_number_models)): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test1_glm_random_grid_search_model_number for GLM: failed, number of models generatedpossible model number {0} and randomized gridsearch model number {1} are not equal.'.format(self.possible_number_models, len(random_grid_model))) else: self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model) if (self.test_failed_array[self.test_num] == 0): print('test1_glm_random_grid_search_model_number for GLM: passed!') self.test_num += 1 sys.stdout.flush()
def test2_glm_random_grid_search_max_model(self): '\n This test is used to test the stopping condition max_model_number in the randomized gridsearch. The\n max_models parameter is randomly generated. If it is higher than the actual possible number of models\n that can be generated with the current hyper-space parameters, randomized grid search should generate\n all the models. Otherwise, grid search shall return a model that equals to the max_model setting.\n ' print('*******************************************************************************************') print(('test2_glm_random_grid_search_max_model for GLM ' + self.family)) h2o.cluster_info() self.max_model_number = random.randint(1, int((self.allowed_scaled_model_number * self.possible_number_models))) search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number, 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) print('Possible number of models built is {0}'.format(self.possible_number_models)) grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) number_model_built = len(grid_model) print('Maximum model limit is {0}. Number of models built is {1}'.format(search_criteria['max_models'], number_model_built)) if (self.possible_number_models >= self.max_model_number): if (not (number_model_built == self.max_model_number)): print('test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not match stopping condition number{1}.'.format(number_model_built, self.max_model_number)) self.test_failed += 1 self.test_failed_array[self.test_num] = 1 else: print('test2_glm_random_grid_search_max_model for GLM: passed.') elif (not (number_model_built == self.possible_number_models)): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal to possible model number {1}.'.format(number_model_built, self.possible_number_models)) else: print('test2_glm_random_grid_search_max_model for GLM: passed.') self.test_num += 1 sys.stdout.flush()
3,782,168,255,454,180,400
This test is used to test the stopping condition max_model_number in the randomized gridsearch. The max_models parameter is randomly generated. If it is higher than the actual possible number of models that can be generated with the current hyper-space parameters, randomized grid search should generate all the models. Otherwise, grid search shall return a model that equals to the max_model setting.
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
test2_glm_random_grid_search_max_model
13927729580/h2o-3
python
def test2_glm_random_grid_search_max_model(self): '\n This test is used to test the stopping condition max_model_number in the randomized gridsearch. The\n max_models parameter is randomly generated. If it is higher than the actual possible number of models\n that can be generated with the current hyper-space parameters, randomized grid search should generate\n all the models. Otherwise, grid search shall return a model that equals to the max_model setting.\n ' print('*******************************************************************************************') print(('test2_glm_random_grid_search_max_model for GLM ' + self.family)) h2o.cluster_info() self.max_model_number = random.randint(1, int((self.allowed_scaled_model_number * self.possible_number_models))) search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number, 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) print('Possible number of models built is {0}'.format(self.possible_number_models)) grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) number_model_built = len(grid_model) print('Maximum model limit is {0}. Number of models built is {1}'.format(search_criteria['max_models'], number_model_built)) if (self.possible_number_models >= self.max_model_number): if (not (number_model_built == self.max_model_number)): print('test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not match stopping condition number{1}.'.format(number_model_built, self.max_model_number)) self.test_failed += 1 self.test_failed_array[self.test_num] = 1 else: print('test2_glm_random_grid_search_max_model for GLM: passed.') elif (not (number_model_built == self.possible_number_models)): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal to possible model number {1}.'.format(number_model_built, self.possible_number_models)) else: print('test2_glm_random_grid_search_max_model for GLM: passed.') self.test_num += 1 sys.stdout.flush()
def test3_glm_random_grid_search_max_runtime_secs(self): '\n This function will test the stopping criteria max_runtime_secs. For each model built, the field\n run_time actually denote the time in ms used to build the model. We will add up the run_time from all\n models and check against the stopping criteria max_runtime_secs. Since each model will check its run time\n differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to\n build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On\n the other hand, deeplearning may check the time it has spent after every epoch of training.\n\n If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain\n percentage, we will consider the test a success.\n\n :return: None\n ' print('*******************************************************************************************') print(('test3_glm_random_grid_search_max_runtime_secs for GLM ' + self.family)) h2o.cluster_info() if ('max_runtime_secs' in list(self.hyper_params)): del self.hyper_params['max_runtime_secs'] self.possible_number_models = pyunit_utils.count_models(self.hyper_params) max_run_time_secs = random.uniform(self.one_model_time, (self.allowed_scaled_time * self.max_grid_runtime)) search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs, 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model) print('Maximum time limit is {0}. Time taken to build all model is {1}'.format(search_criteria['max_runtime_secs'], actual_run_time_secs)) print('Maximum model number is {0}. Actual number of models built is {1}'.format(self.possible_number_models, len(grid_model))) if (actual_run_time_secs <= (search_criteria['max_runtime_secs'] * (1 + self.allowed_diff))): print('test3_glm_random_grid_search_max_runtime_secs: passed!') if (len(grid_model) > self.possible_number_models): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test3_glm_random_grid_search_max_runtime_secs: failed. Generated {0} models which exceeds maximum possible model number {1}'.format(len(grid_model), self.possible_number_models)) elif (len(grid_model) == 1): print('test3_glm_random_grid_search_max_runtime_secs: passed!') else: self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0} seconds which exceeds allowed time {1}'.format(actual_run_time_secs, (max_run_time_secs * (1 + self.allowed_diff)))) self.test_num += 1 sys.stdout.flush()
6,804,544,480,565,740,000
This function will test the stopping criteria max_runtime_secs. For each model built, the field run_time actually denote the time in ms used to build the model. We will add up the run_time from all models and check against the stopping criteria max_runtime_secs. Since each model will check its run time differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On the other hand, deeplearning may check the time it has spent after every epoch of training. If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain percentage, we will consider the test a success. :return: None
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
test3_glm_random_grid_search_max_runtime_secs
13927729580/h2o-3
python
def test3_glm_random_grid_search_max_runtime_secs(self): '\n This function will test the stopping criteria max_runtime_secs. For each model built, the field\n run_time actually denote the time in ms used to build the model. We will add up the run_time from all\n models and check against the stopping criteria max_runtime_secs. Since each model will check its run time\n differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to\n build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On\n the other hand, deeplearning may check the time it has spent after every epoch of training.\n\n If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain\n percentage, we will consider the test a success.\n\n :return: None\n ' print('*******************************************************************************************') print(('test3_glm_random_grid_search_max_runtime_secs for GLM ' + self.family)) h2o.cluster_info() if ('max_runtime_secs' in list(self.hyper_params)): del self.hyper_params['max_runtime_secs'] self.possible_number_models = pyunit_utils.count_models(self.hyper_params) max_run_time_secs = random.uniform(self.one_model_time, (self.allowed_scaled_time * self.max_grid_runtime)) search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs, 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model) print('Maximum time limit is {0}. Time taken to build all model is {1}'.format(search_criteria['max_runtime_secs'], actual_run_time_secs)) print('Maximum model number is {0}. Actual number of models built is {1}'.format(self.possible_number_models, len(grid_model))) if (actual_run_time_secs <= (search_criteria['max_runtime_secs'] * (1 + self.allowed_diff))): print('test3_glm_random_grid_search_max_runtime_secs: passed!') if (len(grid_model) > self.possible_number_models): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test3_glm_random_grid_search_max_runtime_secs: failed. Generated {0} models which exceeds maximum possible model number {1}'.format(len(grid_model), self.possible_number_models)) elif (len(grid_model) == 1): print('test3_glm_random_grid_search_max_runtime_secs: passed!') else: self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print('test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0} seconds which exceeds allowed time {1}'.format(actual_run_time_secs, (max_run_time_secs * (1 + self.allowed_diff)))) self.test_num += 1 sys.stdout.flush()
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better): '\n This function will test the last stopping condition using metrics.\n\n :param metric_name: metric we want to use to test the last stopping condition\n :param bigger_is_better: higher metric value indicates better model performance\n\n :return: None\n ' print('*******************************************************************************************') print(((('test4_glm_random_grid_search_metric using ' + metric_name) + ' for family ') + self.family)) h2o.cluster_info() search_criteria = {'strategy': 'RandomDiscrete', 'stopping_metric': metric_name, 'stopping_tolerance': random.uniform(1e-08, self.max_tolerance), 'stopping_rounds': random.randint(1, self.max_stopping_rounds), 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) self.hyper_params['max_runtime_secs'] = [0.3] grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) stopped_correctly = pyunit_utils.evaluate_metrics_stopping(grid_model.models, metric_name, bigger_is_better, search_criteria, self.possible_number_models) if stopped_correctly: print((('test4_glm_random_grid_search_metric ' + metric_name) + ': passed. ')) else: self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print((('test4_glm_random_grid_search_metric ' + metric_name) + ': failed. ')) self.test_num += 1
-2,631,604,389,071,402,500
This function will test the last stopping condition using metrics. :param metric_name: metric we want to use to test the last stopping condition :param bigger_is_better: higher metric value indicates better model performance :return: None
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
test4_glm_random_grid_search_metric
13927729580/h2o-3
python
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better): '\n This function will test the last stopping condition using metrics.\n\n :param metric_name: metric we want to use to test the last stopping condition\n :param bigger_is_better: higher metric value indicates better model performance\n\n :return: None\n ' print('*******************************************************************************************') print(((('test4_glm_random_grid_search_metric using ' + metric_name) + ' for family ') + self.family)) h2o.cluster_info() search_criteria = {'strategy': 'RandomDiscrete', 'stopping_metric': metric_name, 'stopping_tolerance': random.uniform(1e-08, self.max_tolerance), 'stopping_rounds': random.randint(1, self.max_stopping_rounds), 'seed': round(time.time())} print('GLM Gaussian grid search_criteria: {0}'.format(search_criteria)) self.hyper_params['max_runtime_secs'] = [0.3] grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) stopped_correctly = pyunit_utils.evaluate_metrics_stopping(grid_model.models, metric_name, bigger_is_better, search_criteria, self.possible_number_models) if stopped_correctly: print((('test4_glm_random_grid_search_metric ' + metric_name) + ': passed. ')) else: self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print((('test4_glm_random_grid_search_metric ' + metric_name) + ': failed. ')) self.test_num += 1
def expect_cpp_code_generic_compile_error(expected_error_regex, tmppy_source, module_ir2, module_ir1, cxx_source): "\n Tests that the given source produces the expected error during compilation.\n\n :param expected_error_regex: A regex used to match the _py2tmp error type,\n e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.\n :param cxx_source: The second part of the source code. This will be dedented.\n " expected_error_regex = expected_error_regex.replace(' ', '') def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines): for line in normalized_error_message_lines: if re.search(expected_error_regex, line): return pytest.fail(textwrap.dedent(' Expected error {expected_error} but the compiler output did not contain that.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source:\n {cxx_source}\n ').format(expected_error=expected_error_regex, compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head), pytrace=False) expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
-7,943,035,485,855,268,000
Tests that the given source produces the expected error during compilation. :param expected_error_regex: A regex used to match the _py2tmp error type, e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'. :param cxx_source: The second part of the source code. This will be dedented.
_py2tmp/testing/utils.py
expect_cpp_code_generic_compile_error
DalavanCloud/tmppy
python
def expect_cpp_code_generic_compile_error(expected_error_regex, tmppy_source, module_ir2, module_ir1, cxx_source): "\n Tests that the given source produces the expected error during compilation.\n\n :param expected_error_regex: A regex used to match the _py2tmp error type,\n e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.\n :param cxx_source: The second part of the source code. This will be dedented.\n " expected_error_regex = expected_error_regex.replace(' ', ) def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines): for line in normalized_error_message_lines: if re.search(expected_error_regex, line): return pytest.fail(textwrap.dedent(' Expected error {expected_error} but the compiler output did not contain that.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source:\n {cxx_source}\n ').format(expected_error=expected_error_regex, compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head), pytrace=False) expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_compile_error(expected_py2tmp_error_regex, expected_py2tmp_error_desc_regex, tmppy_source, module_ir2, module_ir1, cxx_source): "\n Tests that the given source produces the expected error during compilation.\n\n :param expected_py2tmp_error_regex: A regex used to match the _py2tmp error type,\n e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.\n :param expected_py2tmp_error_desc_regex: A regex used to match the _py2tmp error description,\n e.g. 'No explicit binding was found for C, and C is an abstract class'.\n :param source_code: The C++ source code. This will be dedented.\n :param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.\n " if ('\n' in expected_py2tmp_error_regex): raise Exception('expected_py2tmp_error_regex should not contain newlines') if ('\n' in expected_py2tmp_error_desc_regex): raise Exception('expected_py2tmp_error_desc_regex should not contain newlines') expected_py2tmp_error_regex = expected_py2tmp_error_regex.replace(' ', '') def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines): for (line_number, line) in enumerate(normalized_error_message_lines): match = re.search('tmppy::impl::(.*Error<.*>)', line) if match: actual_py2tmp_error_line_number = line_number actual_py2tmp_error = match.groups()[0] if (config.CXX_COMPILER_NAME == 'MSVC'): try: replacement_lines = [] if (normalized_error_message_lines[(line_number + 1)].strip() == 'with'): for line in itertools.islice(normalized_error_message_lines, (line_number + 3), None): line = line.strip() if (line == ']'): break if line.endswith(','): line = line[:(- 1)] replacement_lines.append(line) for replacement_line in replacement_lines: match = re.search('([A-Za-z0-9_-]*)=(.*)', replacement_line) if (not match): raise Exception(('Failed to parse replacement line: %s' % replacement_line)) from e (type_variable, type_expression) = match.groups() actual_py2tmp_error = re.sub((('\\b' + type_variable) + '\\b'), type_expression, actual_py2tmp_error) except Exception: raise Exception('Failed to parse MSVC template type arguments') break else: pytest.fail(textwrap.dedent(' Expected error {expected_error} but the compiler output did not contain user-facing _py2tmp errors.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n ').format(expected_error=expected_py2tmp_error_regex, compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head), pytrace=False) for (line_number, line) in enumerate(error_message_lines): match = re.search(py2tmp_error_message_extraction_regex, line) if match: actual_static_assert_error_line_number = line_number actual_static_assert_error = match.groups()[0] break else: pytest.fail(textwrap.dedent(' Expected error {expected_error} but the compiler output did not contain static_assert errors.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n ').format(expected_error=expected_py2tmp_error_regex, compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head), pytrace=False) try: regex_search_result = re.search(expected_py2tmp_error_regex, actual_py2tmp_error) except Exception as e: raise Exception(("re.search() failed for regex '%s'" % expected_py2tmp_error_regex)) from e if (not regex_search_result): pytest.fail(textwrap.dedent(' The compilation failed as expected, but with a different error type.\n Expected _py2tmp error type: {expected_py2tmp_error_regex}\n Error type was: {actual_py2tmp_error}\n Expected static assert error: {expected_py2tmp_error_desc_regex}\n Static assert was: {actual_static_assert_error}\n \n Error message was:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n '.format(expected_py2tmp_error_regex=expected_py2tmp_error_regex, actual_py2tmp_error=actual_py2tmp_error, expected_py2tmp_error_desc_regex=expected_py2tmp_error_desc_regex, actual_static_assert_error=actual_static_assert_error, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head)), pytrace=False) try: regex_search_result = re.search(expected_py2tmp_error_desc_regex, actual_static_assert_error) except Exception as e: raise Exception(("re.search() failed for regex '%s'" % expected_py2tmp_error_desc_regex)) from e if (not regex_search_result): pytest.fail(textwrap.dedent(' The compilation failed as expected, but with a different error message.\n Expected _py2tmp error type: {expected_py2tmp_error_regex}\n Error type was: {actual_py2tmp_error}\n Expected static assert error: {expected_py2tmp_error_desc_regex}\n Static assert was: {actual_static_assert_error}\n \n Error message:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n '.format(expected_py2tmp_error_regex=expected_py2tmp_error_regex, actual_py2tmp_error=actual_py2tmp_error, expected_py2tmp_error_desc_regex=expected_py2tmp_error_desc_regex, actual_static_assert_error=actual_static_assert_error, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head)), pytrace=False) if ((actual_py2tmp_error_line_number > 6) or (actual_static_assert_error_line_number > 6)): pytest.fail(textwrap.dedent(' The compilation failed with the expected message, but the error message contained too many lines before the relevant ones.\n The error type was reported on line {actual_py2tmp_error_line_number} of the message (should be <=6).\n The static assert was reported on line {actual_static_assert_error_line_number} of the message (should be <=6).\n Error message:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n '.format(actual_py2tmp_error_line_number=actual_py2tmp_error_line_number, actual_static_assert_error_line_number=actual_static_assert_error_line_number, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head)), pytrace=False) for line in error_message_lines[:max(actual_py2tmp_error_line_number, actual_static_assert_error_line_number)]: if re.search('tmppy::impl', line): pytest.fail(('The compilation failed with the expected message, but the error message contained some metaprogramming types in the output (besides Error). Error message:\n%s' + error_message_head), pytrace=False) expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
-3,876,706,337,203,248,600
Tests that the given source produces the expected error during compilation. :param expected_py2tmp_error_regex: A regex used to match the _py2tmp error type, e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'. :param expected_py2tmp_error_desc_regex: A regex used to match the _py2tmp error description, e.g. 'No explicit binding was found for C, and C is an abstract class'. :param source_code: The C++ source code. This will be dedented. :param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.
_py2tmp/testing/utils.py
expect_cpp_code_compile_error
DalavanCloud/tmppy
python
def expect_cpp_code_compile_error(expected_py2tmp_error_regex, expected_py2tmp_error_desc_regex, tmppy_source, module_ir2, module_ir1, cxx_source): "\n Tests that the given source produces the expected error during compilation.\n\n :param expected_py2tmp_error_regex: A regex used to match the _py2tmp error type,\n e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.\n :param expected_py2tmp_error_desc_regex: A regex used to match the _py2tmp error description,\n e.g. 'No explicit binding was found for C, and C is an abstract class'.\n :param source_code: The C++ source code. This will be dedented.\n :param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.\n " if ('\n' in expected_py2tmp_error_regex): raise Exception('expected_py2tmp_error_regex should not contain newlines') if ('\n' in expected_py2tmp_error_desc_regex): raise Exception('expected_py2tmp_error_desc_regex should not contain newlines') expected_py2tmp_error_regex = expected_py2tmp_error_regex.replace(' ', ) def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines): for (line_number, line) in enumerate(normalized_error_message_lines): match = re.search('tmppy::impl::(.*Error<.*>)', line) if match: actual_py2tmp_error_line_number = line_number actual_py2tmp_error = match.groups()[0] if (config.CXX_COMPILER_NAME == 'MSVC'): try: replacement_lines = [] if (normalized_error_message_lines[(line_number + 1)].strip() == 'with'): for line in itertools.islice(normalized_error_message_lines, (line_number + 3), None): line = line.strip() if (line == ']'): break if line.endswith(','): line = line[:(- 1)] replacement_lines.append(line) for replacement_line in replacement_lines: match = re.search('([A-Za-z0-9_-]*)=(.*)', replacement_line) if (not match): raise Exception(('Failed to parse replacement line: %s' % replacement_line)) from e (type_variable, type_expression) = match.groups() actual_py2tmp_error = re.sub((('\\b' + type_variable) + '\\b'), type_expression, actual_py2tmp_error) except Exception: raise Exception('Failed to parse MSVC template type arguments') break else: pytest.fail(textwrap.dedent(' Expected error {expected_error} but the compiler output did not contain user-facing _py2tmp errors.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n ').format(expected_error=expected_py2tmp_error_regex, compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head), pytrace=False) for (line_number, line) in enumerate(error_message_lines): match = re.search(py2tmp_error_message_extraction_regex, line) if match: actual_static_assert_error_line_number = line_number actual_static_assert_error = match.groups()[0] break else: pytest.fail(textwrap.dedent(' Expected error {expected_error} but the compiler output did not contain static_assert errors.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n ').format(expected_error=expected_py2tmp_error_regex, compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head), pytrace=False) try: regex_search_result = re.search(expected_py2tmp_error_regex, actual_py2tmp_error) except Exception as e: raise Exception(("re.search() failed for regex '%s'" % expected_py2tmp_error_regex)) from e if (not regex_search_result): pytest.fail(textwrap.dedent(' The compilation failed as expected, but with a different error type.\n Expected _py2tmp error type: {expected_py2tmp_error_regex}\n Error type was: {actual_py2tmp_error}\n Expected static assert error: {expected_py2tmp_error_desc_regex}\n Static assert was: {actual_static_assert_error}\n \n Error message was:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n '.format(expected_py2tmp_error_regex=expected_py2tmp_error_regex, actual_py2tmp_error=actual_py2tmp_error, expected_py2tmp_error_desc_regex=expected_py2tmp_error_desc_regex, actual_static_assert_error=actual_static_assert_error, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head)), pytrace=False) try: regex_search_result = re.search(expected_py2tmp_error_desc_regex, actual_static_assert_error) except Exception as e: raise Exception(("re.search() failed for regex '%s'" % expected_py2tmp_error_desc_regex)) from e if (not regex_search_result): pytest.fail(textwrap.dedent(' The compilation failed as expected, but with a different error message.\n Expected _py2tmp error type: {expected_py2tmp_error_regex}\n Error type was: {actual_py2tmp_error}\n Expected static assert error: {expected_py2tmp_error_desc_regex}\n Static assert was: {actual_static_assert_error}\n \n Error message:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n '.format(expected_py2tmp_error_regex=expected_py2tmp_error_regex, actual_py2tmp_error=actual_py2tmp_error, expected_py2tmp_error_desc_regex=expected_py2tmp_error_desc_regex, actual_static_assert_error=actual_static_assert_error, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head)), pytrace=False) if ((actual_py2tmp_error_line_number > 6) or (actual_static_assert_error_line_number > 6)): pytest.fail(textwrap.dedent(' The compilation failed with the expected message, but the error message contained too many lines before the relevant ones.\n The error type was reported on line {actual_py2tmp_error_line_number} of the message (should be <=6).\n The static assert was reported on line {actual_static_assert_error_line_number} of the message (should be <=6).\n Error message:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source code:\n {cxx_source}\n '.format(actual_py2tmp_error_line_number=actual_py2tmp_error_line_number, actual_static_assert_error_line_number=actual_static_assert_error_line_number, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=error_message_head)), pytrace=False) for line in error_message_lines[:max(actual_py2tmp_error_line_number, actual_static_assert_error_line_number)]: if re.search('tmppy::impl', line): pytest.fail(('The compilation failed with the expected message, but the error message contained some metaprogramming types in the output (besides Error). Error message:\n%s' + error_message_head), pytrace=False) expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, cxx_source): '\n Tests that the given source compiles and runs successfully.\n\n :param source_code: The C++ source code. This will be dedented.\n ' if ('main(' not in cxx_source): cxx_source += textwrap.dedent('\n int main() {\n }\n ') source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp') executable_suffix = {'posix': '', 'nt': '.exe'}[os.name] output_file_name = _create_temporary_file('', executable_suffix) e = None try: compiler.compile_and_link(source=source_file_name, include_dirs=[config.MPYL_INCLUDE_DIR], output_file_name=output_file_name, args=[]) except CommandFailedException as e1: e = e1 if e: pytest.fail(textwrap.dedent(' The generated C++ source did not compile.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source:\n {cxx_source}\n ').format(compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=_cap_to_lines(e.stderr, 40)), pytrace=False) try: run_compiled_executable(output_file_name) except CommandFailedException as e1: e = e1 if e: pytest.fail(textwrap.dedent(' The generated C++ executable did not run successfully.\n stderr was:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n C++ source:\n {cxx_source}\n ').format(tmppy_source=add_line_numbers(tmppy_source), cxx_source=add_line_numbers(cxx_source), error_message=_cap_to_lines(e.stderr, 40)), pytrace=False) try_remove_temporary_file(source_file_name) try_remove_temporary_file(output_file_name)
3,384,454,379,031,476,700
Tests that the given source compiles and runs successfully. :param source_code: The C++ source code. This will be dedented.
_py2tmp/testing/utils.py
expect_cpp_code_success
DalavanCloud/tmppy
python
def expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, cxx_source): '\n Tests that the given source compiles and runs successfully.\n\n :param source_code: The C++ source code. This will be dedented.\n ' if ('main(' not in cxx_source): cxx_source += textwrap.dedent('\n int main() {\n }\n ') source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp') executable_suffix = {'posix': , 'nt': '.exe'}[os.name] output_file_name = _create_temporary_file(, executable_suffix) e = None try: compiler.compile_and_link(source=source_file_name, include_dirs=[config.MPYL_INCLUDE_DIR], output_file_name=output_file_name, args=[]) except CommandFailedException as e1: e = e1 if e: pytest.fail(textwrap.dedent(' The generated C++ source did not compile.\n Compiler command line: {compiler_command}\n Error message was:\n {error_message}\n \n TMPPy source:\n {tmppy_source}\n \n TMPPy IR2:\n {tmppy_ir2}\n \n TMPPy IR1:\n {tmppy_ir1}\n \n C++ source:\n {cxx_source}\n ').format(compiler_command=e.command, tmppy_source=add_line_numbers(tmppy_source), tmppy_ir2=str(module_ir2), tmppy_ir1=str(module_ir1), cxx_source=add_line_numbers(cxx_source), error_message=_cap_to_lines(e.stderr, 40)), pytrace=False) try: run_compiled_executable(output_file_name) except CommandFailedException as e1: e = e1 if e: pytest.fail(textwrap.dedent(' The generated C++ executable did not run successfully.\n stderr was:\n {error_message}\n\n TMPPy source:\n {tmppy_source}\n \n C++ source:\n {cxx_source}\n ').format(tmppy_source=add_line_numbers(tmppy_source), cxx_source=add_line_numbers(cxx_source), error_message=_cap_to_lines(e.stderr, 40)), pytrace=False) try_remove_temporary_file(source_file_name) try_remove_temporary_file(output_file_name)
def test_commit(shared_instance, dbapi_database): 'Test committing a transaction with several statements.' want_row = (1, 'updated-first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (1, 'first-name', 'last-name', 'example@example.com')\n ") cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") cursor.execute("\nUPDATE contacts\nSET email = 'example@example.com'\nWHERE email = 'example@example.com'\n") conn.commit() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() conn.commit() assert (got_rows == [want_row]) cursor.close() conn.close()
8,524,424,343,788,117,000
Test committing a transaction with several statements.
tests/system/test_dbapi.py
test_commit
jpburbank/python-spanner
python
def test_commit(shared_instance, dbapi_database): want_row = (1, 'updated-first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (1, 'first-name', 'last-name', 'example@example.com')\n ") cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") cursor.execute("\nUPDATE contacts\nSET email = 'example@example.com'\nWHERE email = 'example@example.com'\n") conn.commit() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() conn.commit() assert (got_rows == [want_row]) cursor.close() conn.close()
def test_rollback(shared_instance, dbapi_database): 'Test rollbacking a transaction with several statements.' want_row = (2, 'first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (2, 'first-name', 'last-name', 'example@example.com')\n ") conn.commit() cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") cursor.execute("\nUPDATE contacts\nSET email = 'example@example.com'\nWHERE email = 'example@example.com'\n") conn.rollback() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() conn.commit() assert (got_rows == [want_row]) cursor.close() conn.close()
787,733,936,744,369,300
Test rollbacking a transaction with several statements.
tests/system/test_dbapi.py
test_rollback
jpburbank/python-spanner
python
def test_rollback(shared_instance, dbapi_database): want_row = (2, 'first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (2, 'first-name', 'last-name', 'example@example.com')\n ") conn.commit() cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") cursor.execute("\nUPDATE contacts\nSET email = 'example@example.com'\nWHERE email = 'example@example.com'\n") conn.rollback() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() conn.commit() assert (got_rows == [want_row]) cursor.close() conn.close()
def test_autocommit_mode_change(shared_instance, dbapi_database): 'Test auto committing a transaction on `autocommit` mode change.' want_row = (2, 'updated-first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (2, 'first-name', 'last-name', 'example@example.com')\n ") cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") conn.autocommit = True cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() assert (got_rows == [want_row]) cursor.close() conn.close()
-5,353,696,721,925,448,000
Test auto committing a transaction on `autocommit` mode change.
tests/system/test_dbapi.py
test_autocommit_mode_change
jpburbank/python-spanner
python
def test_autocommit_mode_change(shared_instance, dbapi_database): want_row = (2, 'updated-first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (2, 'first-name', 'last-name', 'example@example.com')\n ") cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") conn.autocommit = True cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() assert (got_rows == [want_row]) cursor.close() conn.close()
def test_rollback_on_connection_closing(shared_instance, dbapi_database): "\n When closing a connection all the pending transactions\n must be rollbacked. Testing if it's working this way.\n " want_row = (1, 'first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (1, 'first-name', 'last-name', 'example@example.com')\n ") conn.commit() cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") conn.close() conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() conn.commit() assert (got_rows == [want_row]) cursor.close() conn.close()
-7,109,604,337,055,367,000
When closing a connection all the pending transactions must be rollbacked. Testing if it's working this way.
tests/system/test_dbapi.py
test_rollback_on_connection_closing
jpburbank/python-spanner
python
def test_rollback_on_connection_closing(shared_instance, dbapi_database): "\n When closing a connection all the pending transactions\n must be rollbacked. Testing if it's working this way.\n " want_row = (1, 'first-name', 'last-name', 'example@example.com') conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES (1, 'first-name', 'last-name', 'example@example.com')\n ") conn.commit() cursor.execute("\nUPDATE contacts\nSET first_name = 'updated-first-name'\nWHERE first_name = 'first-name'\n") conn.close() conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() conn.commit() assert (got_rows == [want_row]) cursor.close() conn.close()
def test_results_checksum(shared_instance, dbapi_database): 'Test that results checksum is calculated properly.' conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES\n(1, 'first-name', 'last-name', 'example@example.com'),\n(2, 'first-name2', 'last-name2', 'example@example.com')\n ") assert (len(conn._statements) == 1) conn.commit() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() assert (len(conn._statements) == 1) conn.commit() checksum = hashlib.sha256() checksum.update(pickle.dumps(got_rows[0])) checksum.update(pickle.dumps(got_rows[1])) assert (cursor._checksum.checksum.digest() == checksum.digest())
3,821,948,948,630,807,600
Test that results checksum is calculated properly.
tests/system/test_dbapi.py
test_results_checksum
jpburbank/python-spanner
python
def test_results_checksum(shared_instance, dbapi_database): conn = Connection(shared_instance, dbapi_database) cursor = conn.cursor() cursor.execute("\nINSERT INTO contacts (contact_id, first_name, last_name, email)\nVALUES\n(1, 'first-name', 'last-name', 'example@example.com'),\n(2, 'first-name2', 'last-name2', 'example@example.com')\n ") assert (len(conn._statements) == 1) conn.commit() cursor.execute('SELECT * FROM contacts') got_rows = cursor.fetchall() assert (len(conn._statements) == 1) conn.commit() checksum = hashlib.sha256() checksum.update(pickle.dumps(got_rows[0])) checksum.update(pickle.dumps(got_rows[1])) assert (cursor._checksum.checksum.digest() == checksum.digest())
def test_DDL_autocommit(shared_instance, dbapi_database): 'Check that DDLs in autocommit mode are immediately executed.' conn = Connection(shared_instance, dbapi_database) conn.autocommit = True cur = conn.cursor() cur.execute('\n CREATE TABLE Singers (\n SingerId INT64 NOT NULL,\n Name STRING(1024),\n ) PRIMARY KEY (SingerId)\n ') conn.close() conn = Connection(shared_instance, dbapi_database) cur = conn.cursor() cur.execute('DROP TABLE Singers') conn.commit()
7,331,400,359,012,387,000
Check that DDLs in autocommit mode are immediately executed.
tests/system/test_dbapi.py
test_DDL_autocommit
jpburbank/python-spanner
python
def test_DDL_autocommit(shared_instance, dbapi_database): conn = Connection(shared_instance, dbapi_database) conn.autocommit = True cur = conn.cursor() cur.execute('\n CREATE TABLE Singers (\n SingerId INT64 NOT NULL,\n Name STRING(1024),\n ) PRIMARY KEY (SingerId)\n ') conn.close() conn = Connection(shared_instance, dbapi_database) cur = conn.cursor() cur.execute('DROP TABLE Singers') conn.commit()
def test_DDL_commit(shared_instance, dbapi_database): 'Check that DDLs in commit mode are executed on calling `commit()`.' conn = Connection(shared_instance, dbapi_database) cur = conn.cursor() cur.execute('\n CREATE TABLE Singers (\n SingerId INT64 NOT NULL,\n Name STRING(1024),\n ) PRIMARY KEY (SingerId)\n ') conn.commit() conn.close() conn = Connection(shared_instance, dbapi_database) cur = conn.cursor() cur.execute('DROP TABLE Singers') conn.commit()
3,273,943,874,377,342,000
Check that DDLs in commit mode are executed on calling `commit()`.
tests/system/test_dbapi.py
test_DDL_commit
jpburbank/python-spanner
python
def test_DDL_commit(shared_instance, dbapi_database): conn = Connection(shared_instance, dbapi_database) cur = conn.cursor() cur.execute('\n CREATE TABLE Singers (\n SingerId INT64 NOT NULL,\n Name STRING(1024),\n ) PRIMARY KEY (SingerId)\n ') conn.commit() conn.close() conn = Connection(shared_instance, dbapi_database) cur = conn.cursor() cur.execute('DROP TABLE Singers') conn.commit()
def test_ping(shared_instance, dbapi_database): 'Check connection validation method.' conn = Connection(shared_instance, dbapi_database) conn.validate() conn.close()
427,163,684,188,578,200
Check connection validation method.
tests/system/test_dbapi.py
test_ping
jpburbank/python-spanner
python
def test_ping(shared_instance, dbapi_database): conn = Connection(shared_instance, dbapi_database) conn.validate() conn.close()
def _last_stack_str(): "Print stack trace from call that didn't originate from here" stack = extract_stack() for s in stack[::(- 1)]: if (op.join('vispy', 'gloo', 'buffer.py') not in __file__): break return format_list([s])[0]
615,006,874,114,146,700
Print stack trace from call that didn't originate from here
vispy/gloo/buffer.py
_last_stack_str
CVandML/vispy
python
def _last_stack_str(): stack = extract_stack() for s in stack[::(- 1)]: if (op.join('vispy', 'gloo', 'buffer.py') not in __file__): break return format_list([s])[0]
@property def nbytes(self): ' Buffer size in bytes ' return self._nbytes
-2,619,702,808,926,269,000
Buffer size in bytes
vispy/gloo/buffer.py
nbytes
CVandML/vispy
python
@property def nbytes(self): ' ' return self._nbytes
def set_subdata(self, data, offset=0, copy=False): ' Set a sub-region of the buffer (deferred operation).\n\n Parameters\n ----------\n\n data : ndarray\n Data to be uploaded\n offset: int\n Offset in buffer where to start copying data (in bytes)\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n ' data = np.array(data, copy=copy) nbytes = data.nbytes if (offset < 0): raise ValueError('Offset must be positive') elif ((offset + nbytes) > self._nbytes): raise ValueError('Data does not fit into buffer') if ((nbytes == self._nbytes) and (offset == 0)): self._glir.command('SIZE', self._id, nbytes) self._glir.command('DATA', self._id, offset, data)
-5,062,661,572,289,726,000
Set a sub-region of the buffer (deferred operation). Parameters ---------- data : ndarray Data to be uploaded offset: int Offset in buffer where to start copying data (in bytes) copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior.
vispy/gloo/buffer.py
set_subdata
CVandML/vispy
python
def set_subdata(self, data, offset=0, copy=False): ' Set a sub-region of the buffer (deferred operation).\n\n Parameters\n ----------\n\n data : ndarray\n Data to be uploaded\n offset: int\n Offset in buffer where to start copying data (in bytes)\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n ' data = np.array(data, copy=copy) nbytes = data.nbytes if (offset < 0): raise ValueError('Offset must be positive') elif ((offset + nbytes) > self._nbytes): raise ValueError('Data does not fit into buffer') if ((nbytes == self._nbytes) and (offset == 0)): self._glir.command('SIZE', self._id, nbytes) self._glir.command('DATA', self._id, offset, data)
def set_data(self, data, copy=False): ' Set data in the buffer (deferred operation).\n\n This completely resets the size and contents of the buffer.\n\n Parameters\n ----------\n data : ndarray\n Data to be uploaded\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n ' data = np.array(data, copy=copy) nbytes = data.nbytes if (nbytes != self._nbytes): self.resize_bytes(nbytes) else: self._glir.command('SIZE', self._id, nbytes) if nbytes: self._glir.command('DATA', self._id, 0, data)
768,647,042,963,925,500
Set data in the buffer (deferred operation). This completely resets the size and contents of the buffer. Parameters ---------- data : ndarray Data to be uploaded copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior.
vispy/gloo/buffer.py
set_data
CVandML/vispy
python
def set_data(self, data, copy=False): ' Set data in the buffer (deferred operation).\n\n This completely resets the size and contents of the buffer.\n\n Parameters\n ----------\n data : ndarray\n Data to be uploaded\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n ' data = np.array(data, copy=copy) nbytes = data.nbytes if (nbytes != self._nbytes): self.resize_bytes(nbytes) else: self._glir.command('SIZE', self._id, nbytes) if nbytes: self._glir.command('DATA', self._id, 0, data)
def resize_bytes(self, size): ' Resize this buffer (deferred operation). \n \n Parameters\n ----------\n size : int\n New buffer size in bytes.\n ' self._nbytes = size self._glir.command('SIZE', self._id, size) for view in self._views: if (view() is not None): view()._valid = False self._views = []
-7,041,053,181,543,419,000
Resize this buffer (deferred operation). Parameters ---------- size : int New buffer size in bytes.
vispy/gloo/buffer.py
resize_bytes
CVandML/vispy
python
def resize_bytes(self, size): ' Resize this buffer (deferred operation). \n \n Parameters\n ----------\n size : int\n New buffer size in bytes.\n ' self._nbytes = size self._glir.command('SIZE', self._id, size) for view in self._views: if (view() is not None): view()._valid = False self._views = []
def set_subdata(self, data, offset=0, copy=False, **kwargs): ' Set a sub-region of the buffer (deferred operation).\n\n Parameters\n ----------\n\n data : ndarray\n Data to be uploaded\n offset: int\n Offset in buffer where to start copying data (in bytes)\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n **kwargs : dict\n Additional keyword arguments.\n ' data = self._prepare_data(data, **kwargs) offset = (offset * self.itemsize) Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
2,903,474,374,651,890,700
Set a sub-region of the buffer (deferred operation). Parameters ---------- data : ndarray Data to be uploaded offset: int Offset in buffer where to start copying data (in bytes) copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. **kwargs : dict Additional keyword arguments.
vispy/gloo/buffer.py
set_subdata
CVandML/vispy
python
def set_subdata(self, data, offset=0, copy=False, **kwargs): ' Set a sub-region of the buffer (deferred operation).\n\n Parameters\n ----------\n\n data : ndarray\n Data to be uploaded\n offset: int\n Offset in buffer where to start copying data (in bytes)\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n **kwargs : dict\n Additional keyword arguments.\n ' data = self._prepare_data(data, **kwargs) offset = (offset * self.itemsize) Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
def set_data(self, data, copy=False, **kwargs): ' Set data (deferred operation)\n\n Parameters\n ----------\n data : ndarray\n Data to be uploaded\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n **kwargs : dict\n Additional arguments.\n ' data = self._prepare_data(data, **kwargs) self._dtype = data.dtype self._stride = data.strides[(- 1)] self._itemsize = self._dtype.itemsize Buffer.set_data(self, data=data, copy=copy)
2,841,027,101,147,999,000
Set data (deferred operation) Parameters ---------- data : ndarray Data to be uploaded copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. **kwargs : dict Additional arguments.
vispy/gloo/buffer.py
set_data
CVandML/vispy
python
def set_data(self, data, copy=False, **kwargs): ' Set data (deferred operation)\n\n Parameters\n ----------\n data : ndarray\n Data to be uploaded\n copy: bool\n Since the operation is deferred, data may change before\n data is actually uploaded to GPU memory.\n Asking explicitly for a copy will prevent this behavior.\n **kwargs : dict\n Additional arguments.\n ' data = self._prepare_data(data, **kwargs) self._dtype = data.dtype self._stride = data.strides[(- 1)] self._itemsize = self._dtype.itemsize Buffer.set_data(self, data=data, copy=copy)
@property def dtype(self): ' Buffer dtype ' return self._dtype
4,078,825,277,664,268,000
Buffer dtype
vispy/gloo/buffer.py
dtype
CVandML/vispy
python
@property def dtype(self): ' ' return self._dtype
@property def offset(self): ' Buffer offset (in bytes) relative to base ' return 0
8,895,473,055,360,589,000
Buffer offset (in bytes) relative to base
vispy/gloo/buffer.py
offset
CVandML/vispy
python
@property def offset(self): ' ' return 0
@property def stride(self): ' Stride of data in memory ' return self._stride
4,803,607,196,136,743,000
Stride of data in memory
vispy/gloo/buffer.py
stride
CVandML/vispy
python
@property def stride(self): ' ' return self._stride
@property def size(self): ' Number of elements in the buffer ' return self._size
-2,399,418,111,054,706,700
Number of elements in the buffer
vispy/gloo/buffer.py
size
CVandML/vispy
python
@property def size(self): ' ' return self._size
@property def itemsize(self): ' The total number of bytes required to store the array data ' return self._itemsize
2,928,576,800,103,052,000
The total number of bytes required to store the array data
vispy/gloo/buffer.py
itemsize
CVandML/vispy
python
@property def itemsize(self): ' ' return self._itemsize
@property def glsl_type(self): ' GLSL declaration strings required for a variable to hold this data.\n ' if (self.dtype is None): return None dtshape = self.dtype[0].shape n = (dtshape[0] if dtshape else 1) if (n > 1): dtype = ('vec%d' % n) else: dtype = ('float' if ('f' in self.dtype[0].base.kind) else 'int') return ('attribute', dtype)
6,848,344,529,212,954,000
GLSL declaration strings required for a variable to hold this data.
vispy/gloo/buffer.py
glsl_type
CVandML/vispy
python
@property def glsl_type(self): ' \n ' if (self.dtype is None): return None dtshape = self.dtype[0].shape n = (dtshape[0] if dtshape else 1) if (n > 1): dtype = ('vec%d' % n) else: dtype = ('float' if ('f' in self.dtype[0].base.kind) else 'int') return ('attribute', dtype)
def resize_bytes(self, size): ' Resize the buffer (in-place, deferred operation)\n\n Parameters\n ----------\n size : integer\n New buffer size in bytes\n\n Notes\n -----\n This clears any pending operations.\n ' Buffer.resize_bytes(self, size) self._size = (size // self.itemsize)
3,280,132,657,365,361,700
Resize the buffer (in-place, deferred operation) Parameters ---------- size : integer New buffer size in bytes Notes ----- This clears any pending operations.
vispy/gloo/buffer.py
resize_bytes
CVandML/vispy
python
def resize_bytes(self, size): ' Resize the buffer (in-place, deferred operation)\n\n Parameters\n ----------\n size : integer\n New buffer size in bytes\n\n Notes\n -----\n This clears any pending operations.\n ' Buffer.resize_bytes(self, size) self._size = (size // self.itemsize)
def __getitem__(self, key): ' Create a view on this buffer. ' view = DataBufferView(self, key) self._views.append(weakref.ref(view)) return view
-1,744,978,608,335,112,700
Create a view on this buffer.
vispy/gloo/buffer.py
__getitem__
CVandML/vispy
python
def __getitem__(self, key): ' ' view = DataBufferView(self, key) self._views.append(weakref.ref(view)) return view
def __setitem__(self, key, data): ' Set data (deferred operation) ' if isinstance(key, string_types): raise ValueError('Cannot set non-contiguous data on buffer') elif isinstance(key, int): if (key < 0): key += self.size if ((key < 0) or (key > self.size)): raise IndexError('Buffer assignment index out of range') (start, stop, step) = (key, (key + 1), 1) elif isinstance(key, slice): (start, stop, step) = key.indices(self.size) if (stop < start): (start, stop) = (stop, start) elif (key == Ellipsis): (start, stop, step) = (0, self.size, 1) else: raise TypeError('Buffer indices must be integers or strings') if (step != 1): raise ValueError('Cannot set non-contiguous data on buffer') if (not isinstance(data, np.ndarray)): data = np.array(data, dtype=self.dtype, copy=False) if (data.size < (stop - start)): data = np.resize(data, (stop - start)) elif (data.size > (stop - start)): raise ValueError('Data too big to fit GPU data.') offset = start self.set_subdata(data=data, offset=offset, copy=True)
-7,364,368,203,983,513,000
Set data (deferred operation)
vispy/gloo/buffer.py
__setitem__
CVandML/vispy
python
def __setitem__(self, key, data): ' ' if isinstance(key, string_types): raise ValueError('Cannot set non-contiguous data on buffer') elif isinstance(key, int): if (key < 0): key += self.size if ((key < 0) or (key > self.size)): raise IndexError('Buffer assignment index out of range') (start, stop, step) = (key, (key + 1), 1) elif isinstance(key, slice): (start, stop, step) = key.indices(self.size) if (stop < start): (start, stop) = (stop, start) elif (key == Ellipsis): (start, stop, step) = (0, self.size, 1) else: raise TypeError('Buffer indices must be integers or strings') if (step != 1): raise ValueError('Cannot set non-contiguous data on buffer') if (not isinstance(data, np.ndarray)): data = np.array(data, dtype=self.dtype, copy=False) if (data.size < (stop - start)): data = np.resize(data, (stop - start)) elif (data.size > (stop - start)): raise ValueError('Data too big to fit GPU data.') offset = start self.set_subdata(data=data, offset=offset, copy=True)
@property def offset(self): ' Buffer offset (in bytes) relative to base ' return self._offset
8,690,617,661,463,767,000
Buffer offset (in bytes) relative to base
vispy/gloo/buffer.py
offset
CVandML/vispy
python
@property def offset(self): ' ' return self._offset
@property def base(self): 'Buffer base if this buffer is a view on another buffer. ' return self._base
5,915,824,618,440,580,000
Buffer base if this buffer is a view on another buffer.
vispy/gloo/buffer.py
base
CVandML/vispy
python
@property def base(self): ' ' return self._base
def start(): '\n This function is run once every time the start button is pressed\n ' global max_speed global show_triggers global show_joysticks print('Start function called') rc.set_update_slow_time(0.5) rc.drive.stop() max_speed = 0.25 show_triggers = False show_joysticks = False assert (rc_utils.remap_range(5, 0, 10, 0, 50) == 25) assert (rc_utils.remap_range(5, 0, 20, 1000, 900) == 975) assert (rc_utils.remap_range(2, 0, 1, (- 10), 10) == 30) assert (rc_utils.remap_range(2, 0, 1, (- 10), 10, True) == 10) assert (rc_utils.clamp(3, 0, 10) == 3) assert (rc_utils.clamp((- 2), 0, 10) == 0) assert (rc_utils.clamp(11, 0, 10) == 10) print('>> Test Utils: A testing program for the racecar_utils library.\n\nControls:\n Right trigger = accelerate forward\n Left trigger = accelerate backward\n Left joystick = turn front wheels\n A button = Take a color image and crop it to the top left\n B button = Take a color image and identify the largest red contour\n X button = Take a depth image and print several statistics\n Y button = Take a lidar scan and print several statistics\n')
-7,655,744,681,631,914,000
This function is run once every time the start button is pressed
labs/test_utils.py
start
MITLLRacecar/racecar-allison-aj
python
def start(): '\n \n ' global max_speed global show_triggers global show_joysticks print('Start function called') rc.set_update_slow_time(0.5) rc.drive.stop() max_speed = 0.25 show_triggers = False show_joysticks = False assert (rc_utils.remap_range(5, 0, 10, 0, 50) == 25) assert (rc_utils.remap_range(5, 0, 20, 1000, 900) == 975) assert (rc_utils.remap_range(2, 0, 1, (- 10), 10) == 30) assert (rc_utils.remap_range(2, 0, 1, (- 10), 10, True) == 10) assert (rc_utils.clamp(3, 0, 10) == 3) assert (rc_utils.clamp((- 2), 0, 10) == 0) assert (rc_utils.clamp(11, 0, 10) == 10) print('>> Test Utils: A testing program for the racecar_utils library.\n\nControls:\n Right trigger = accelerate forward\n Left trigger = accelerate backward\n Left joystick = turn front wheels\n A button = Take a color image and crop it to the top left\n B button = Take a color image and identify the largest red contour\n X button = Take a depth image and print several statistics\n Y button = Take a lidar scan and print several statistics\n')
def update(): '\n After start() is run, this function is run every frame until the back button\n is pressed\n ' if rc.controller.was_pressed(rc.controller.Button.A): image = rc.camera.get_color_image() cropped = rc_utils.crop(image, (0, 0), ((rc.camera.get_height() // 2), (rc.camera.get_width() // 2))) rc.display.show_color_image(cropped) if rc.controller.was_pressed(rc.controller.Button.B): image = rc.camera.get_color_image() contours = rc_utils.find_contours(image, RED[0], RED[1]) largest_contour = rc_utils.get_largest_contour(contours) if (largest_contour is not None): center = rc_utils.get_contour_center(largest_contour) area = rc_utils.get_contour_area(largest_contour) print('Largest red contour: center={}, area={:.2f}'.format(center, area)) rc_utils.draw_contour(image, largest_contour, rc_utils.ColorBGR.green.value) rc_utils.draw_circle(image, center, rc_utils.ColorBGR.yellow.value) rc.display.show_color_image(image) else: print('No red contours found') if rc.controller.was_pressed(rc.controller.Button.X): depth_image = rc.camera.get_depth_image() left_distance = rc_utils.get_pixel_average_distance(depth_image, ((rc.camera.get_height() // 2), (rc.camera.get_width() // 4))) center_distance = rc_utils.get_depth_image_center_distance(depth_image) center_distance_raw = rc_utils.get_depth_image_center_distance(depth_image, 1) right_distance = rc_utils.get_pixel_average_distance(depth_image, ((rc.camera.get_height() // 2), ((3 * rc.camera.get_width()) // 4))) print(f'Depth image left distance: {left_distance:.2f} cm') print(f'Depth image center distance: {center_distance:.2f} cm') print(f'Depth image raw center distance: {center_distance_raw:.2f} cm') print(f'Depth image right distance: {right_distance:.2f} cm') upper_left_distance = rc_utils.get_pixel_average_distance(depth_image, (2, 1), 11) lower_right_distance = rc_utils.get_pixel_average_distance(depth_image, ((rc.camera.get_height() - 2), (rc.camera.get_width() - 5)), 13) print(f'Depth image upper left distance: {upper_left_distance:.2f} cm') print(f'Depth image lower right distance: {lower_right_distance:.2f} cm') cropped = rc_utils.crop(depth_image, (0, 0), (((rc.camera.get_height() * 2) // 3), rc.camera.get_width())) closest_point = rc_utils.get_closest_pixel(cropped) closest_distance = cropped[closest_point[0]][closest_point[1]] print(f'Depth image closest point (upper half): (row={closest_point[0]}, col={closest_point[1]}), distance={closest_distance:.2f} cm') rc.display.show_depth_image(cropped, points=[closest_point]) if rc.controller.was_pressed(rc.controller.Button.Y): lidar = rc.lidar.get_samples() front_distance = rc_utils.get_lidar_average_distance(lidar, 0) right_distance = rc_utils.get_lidar_average_distance(lidar, 90) back_distance = rc_utils.get_lidar_average_distance(lidar, 180) left_distance = rc_utils.get_lidar_average_distance(lidar, 270) print(f'Front LIDAR distance: {front_distance:.2f} cm') print(f'Right LIDAR distance: {right_distance:.2f} cm') print(f'Back LIDAR distance: {back_distance:.2f} cm') print(f'Left LIDAR distance: {left_distance:.2f} cm') closest_sample = rc_utils.get_lidar_closest_point(lidar) print(f'Closest LIDAR point: {closest_sample[0]:.2f} degrees, {closest_sample[1]:.2f} cm') rc.display.show_lidar(lidar, highlighted_samples=[closest_sample]) (rjoy_x, rjoy_y) = rc.controller.get_joystick(rc.controller.Joystick.RIGHT) if ((abs(rjoy_x) > 0) or (abs(rjoy_y) > 0)): lidar = rc.lidar.get_samples() angle = (((math.atan2(rjoy_x, rjoy_y) * 180) / math.pi) % 360) distance = rc_utils.get_lidar_average_distance(lidar, angle) print(f'LIDAR distance at angle {angle:.2f} = {distance:.2f} cm') left_trigger = rc.controller.get_trigger(rc.controller.Trigger.LEFT) right_trigger = rc.controller.get_trigger(rc.controller.Trigger.RIGHT) left_joystick = rc.controller.get_joystick(rc.controller.Joystick.LEFT) rc.drive.set_speed_angle((right_trigger - left_trigger), left_joystick[0])
5,690,716,355,874,481,000
After start() is run, this function is run every frame until the back button is pressed
labs/test_utils.py
update
MITLLRacecar/racecar-allison-aj
python
def update(): '\n After start() is run, this function is run every frame until the back button\n is pressed\n ' if rc.controller.was_pressed(rc.controller.Button.A): image = rc.camera.get_color_image() cropped = rc_utils.crop(image, (0, 0), ((rc.camera.get_height() // 2), (rc.camera.get_width() // 2))) rc.display.show_color_image(cropped) if rc.controller.was_pressed(rc.controller.Button.B): image = rc.camera.get_color_image() contours = rc_utils.find_contours(image, RED[0], RED[1]) largest_contour = rc_utils.get_largest_contour(contours) if (largest_contour is not None): center = rc_utils.get_contour_center(largest_contour) area = rc_utils.get_contour_area(largest_contour) print('Largest red contour: center={}, area={:.2f}'.format(center, area)) rc_utils.draw_contour(image, largest_contour, rc_utils.ColorBGR.green.value) rc_utils.draw_circle(image, center, rc_utils.ColorBGR.yellow.value) rc.display.show_color_image(image) else: print('No red contours found') if rc.controller.was_pressed(rc.controller.Button.X): depth_image = rc.camera.get_depth_image() left_distance = rc_utils.get_pixel_average_distance(depth_image, ((rc.camera.get_height() // 2), (rc.camera.get_width() // 4))) center_distance = rc_utils.get_depth_image_center_distance(depth_image) center_distance_raw = rc_utils.get_depth_image_center_distance(depth_image, 1) right_distance = rc_utils.get_pixel_average_distance(depth_image, ((rc.camera.get_height() // 2), ((3 * rc.camera.get_width()) // 4))) print(f'Depth image left distance: {left_distance:.2f} cm') print(f'Depth image center distance: {center_distance:.2f} cm') print(f'Depth image raw center distance: {center_distance_raw:.2f} cm') print(f'Depth image right distance: {right_distance:.2f} cm') upper_left_distance = rc_utils.get_pixel_average_distance(depth_image, (2, 1), 11) lower_right_distance = rc_utils.get_pixel_average_distance(depth_image, ((rc.camera.get_height() - 2), (rc.camera.get_width() - 5)), 13) print(f'Depth image upper left distance: {upper_left_distance:.2f} cm') print(f'Depth image lower right distance: {lower_right_distance:.2f} cm') cropped = rc_utils.crop(depth_image, (0, 0), (((rc.camera.get_height() * 2) // 3), rc.camera.get_width())) closest_point = rc_utils.get_closest_pixel(cropped) closest_distance = cropped[closest_point[0]][closest_point[1]] print(f'Depth image closest point (upper half): (row={closest_point[0]}, col={closest_point[1]}), distance={closest_distance:.2f} cm') rc.display.show_depth_image(cropped, points=[closest_point]) if rc.controller.was_pressed(rc.controller.Button.Y): lidar = rc.lidar.get_samples() front_distance = rc_utils.get_lidar_average_distance(lidar, 0) right_distance = rc_utils.get_lidar_average_distance(lidar, 90) back_distance = rc_utils.get_lidar_average_distance(lidar, 180) left_distance = rc_utils.get_lidar_average_distance(lidar, 270) print(f'Front LIDAR distance: {front_distance:.2f} cm') print(f'Right LIDAR distance: {right_distance:.2f} cm') print(f'Back LIDAR distance: {back_distance:.2f} cm') print(f'Left LIDAR distance: {left_distance:.2f} cm') closest_sample = rc_utils.get_lidar_closest_point(lidar) print(f'Closest LIDAR point: {closest_sample[0]:.2f} degrees, {closest_sample[1]:.2f} cm') rc.display.show_lidar(lidar, highlighted_samples=[closest_sample]) (rjoy_x, rjoy_y) = rc.controller.get_joystick(rc.controller.Joystick.RIGHT) if ((abs(rjoy_x) > 0) or (abs(rjoy_y) > 0)): lidar = rc.lidar.get_samples() angle = (((math.atan2(rjoy_x, rjoy_y) * 180) / math.pi) % 360) distance = rc_utils.get_lidar_average_distance(lidar, angle) print(f'LIDAR distance at angle {angle:.2f} = {distance:.2f} cm') left_trigger = rc.controller.get_trigger(rc.controller.Trigger.LEFT) right_trigger = rc.controller.get_trigger(rc.controller.Trigger.RIGHT) left_joystick = rc.controller.get_joystick(rc.controller.Joystick.LEFT) rc.drive.set_speed_angle((right_trigger - left_trigger), left_joystick[0])
def shrink(coords: np.ndarray, dist: np.ndarray) -> tuple[np.ndarray]: 'Shrinks a 2D polygon by a given distance.\n\n The coordinates of the polygon are expected as an N x 2-matrix,\n and a positive distance results in inward shrinking.\n \n An empty set is returned if the shrinking operation removes all\n original elements.\n\n Args:\n coords: A matrix of coordinates.\n dist: The distance to shrink by.\n\n Returns:\n A tuple containing the x, y coordinates of the original set, as\n well as the x and y coordinates of the shrunken set, in that\n order.\n ' my_polygon = geometry.Polygon(coords) xy = my_polygon.exterior.xy my_polygon_shrunken = my_polygon.buffer((- dist)) try: xys = my_polygon_shrunken.exterior.xy except AttributeError: xys = ([0], [0]) return (*xy, *xys)
9,125,759,857,284,818,000
Shrinks a 2D polygon by a given distance. The coordinates of the polygon are expected as an N x 2-matrix, and a positive distance results in inward shrinking. An empty set is returned if the shrinking operation removes all original elements. Args: coords: A matrix of coordinates. dist: The distance to shrink by. Returns: A tuple containing the x, y coordinates of the original set, as well as the x and y coordinates of the shrunken set, in that order.
geometry_tools.py
shrink
helkebir/Reachable-Set-Inner-Approximation
python
def shrink(coords: np.ndarray, dist: np.ndarray) -> tuple[np.ndarray]: 'Shrinks a 2D polygon by a given distance.\n\n The coordinates of the polygon are expected as an N x 2-matrix,\n and a positive distance results in inward shrinking.\n \n An empty set is returned if the shrinking operation removes all\n original elements.\n\n Args:\n coords: A matrix of coordinates.\n dist: The distance to shrink by.\n\n Returns:\n A tuple containing the x, y coordinates of the original set, as\n well as the x and y coordinates of the shrunken set, in that\n order.\n ' my_polygon = geometry.Polygon(coords) xy = my_polygon.exterior.xy my_polygon_shrunken = my_polygon.buffer((- dist)) try: xys = my_polygon_shrunken.exterior.xy except AttributeError: xys = ([0], [0]) return (*xy, *xys)
def hausdorff(A: np.ndarray, B: np.ndarray) -> float: 'Computes the Hausdorff distance between two 2D polygons.\n\n Args:\n A: A matrix defining the first polygon.\n B: A matrix defining the second polygon.\n \n Returns:\n A float representing the Hausdorff distance.\n ' return geometry.Polygon(A).hausdorff_distance(geometry.Polygon(B))
5,987,260,360,704,853,000
Computes the Hausdorff distance between two 2D polygons. Args: A: A matrix defining the first polygon. B: A matrix defining the second polygon. Returns: A float representing the Hausdorff distance.
geometry_tools.py
hausdorff
helkebir/Reachable-Set-Inner-Approximation
python
def hausdorff(A: np.ndarray, B: np.ndarray) -> float: 'Computes the Hausdorff distance between two 2D polygons.\n\n Args:\n A: A matrix defining the first polygon.\n B: A matrix defining the second polygon.\n \n Returns:\n A float representing the Hausdorff distance.\n ' return geometry.Polygon(A).hausdorff_distance(geometry.Polygon(B))
def read_polygon(file: str) -> np.ndarray: 'Reads a polygon from a table.\n\n Args:\n file: Path to a file containing a plain text, tab-separated\n table with scalars.\n \n Returns:\n A matrix containing the data in the file.\n ' return np.genfromtxt(file)
2,876,362,114,396,736,500
Reads a polygon from a table. Args: file: Path to a file containing a plain text, tab-separated table with scalars. Returns: A matrix containing the data in the file.
geometry_tools.py
read_polygon
helkebir/Reachable-Set-Inner-Approximation
python
def read_polygon(file: str) -> np.ndarray: 'Reads a polygon from a table.\n\n Args:\n file: Path to a file containing a plain text, tab-separated\n table with scalars.\n \n Returns:\n A matrix containing the data in the file.\n ' return np.genfromtxt(file)
def deserialize_args(args): 'Try to deserialize given args. Return input if not serialized' deserialized = parse_qs(args) if (deserialized == {}): return args else: return deserialized
5,916,436,655,583,058,000
Try to deserialize given args. Return input if not serialized
dartui/utils.py
deserialize_args
cjlucas/DarTui
python
def deserialize_args(args): deserialized = parse_qs(args) if (deserialized == {}): return args else: return deserialized
def get_disk_usage(path): "Return disk usage statistics about the given path.\n\n Returned valus is a named tuple with attributes 'total', 'used' and\n 'free', which are the amount of total, used and free space, in bytes.\n \n Source: http://stackoverflow.com/a/7285483/975118\n " st = os.statvfs(path) free = (st.f_bavail * st.f_frsize) total = (st.f_blocks * st.f_frsize) used = ((st.f_blocks - st.f_bfree) * st.f_frsize) return _ntuple_diskusage(total, used, free)
-6,989,553,908,795,418,000
Return disk usage statistics about the given path. Returned valus is a named tuple with attributes 'total', 'used' and 'free', which are the amount of total, used and free space, in bytes. Source: http://stackoverflow.com/a/7285483/975118
dartui/utils.py
get_disk_usage
cjlucas/DarTui
python
def get_disk_usage(path): "Return disk usage statistics about the given path.\n\n Returned valus is a named tuple with attributes 'total', 'used' and\n 'free', which are the amount of total, used and free space, in bytes.\n \n Source: http://stackoverflow.com/a/7285483/975118\n " st = os.statvfs(path) free = (st.f_bavail * st.f_frsize) total = (st.f_blocks * st.f_frsize) used = ((st.f_blocks - st.f_bfree) * st.f_frsize) return _ntuple_diskusage(total, used, free)
def get_torrent_files(f): '\n Input:\n f -- cgi.FileStorage object \n Returns:\n torrent_files -- a list of TorrentFile objects\n ' torrent_files = [] if f.filename.lower().endswith('.zip'): z = zipfile.ZipFile(f.file) torrent_files = [TorrentFile(name=zi.filename, data=z.open(zi).read()) for zi in z.infolist() if zi.filename.lower().endswith('.torrent')] elif f.filename.lower().endswith('.torrent'): torrent_files = [TorrentFile(name=f.filename, data=f.file.read())] return torrent_files
-3,494,092,591,716,452,000
Input: f -- cgi.FileStorage object Returns: torrent_files -- a list of TorrentFile objects
dartui/utils.py
get_torrent_files
cjlucas/DarTui
python
def get_torrent_files(f): '\n Input:\n f -- cgi.FileStorage object \n Returns:\n torrent_files -- a list of TorrentFile objects\n ' torrent_files = [] if f.filename.lower().endswith('.zip'): z = zipfile.ZipFile(f.file) torrent_files = [TorrentFile(name=zi.filename, data=z.open(zi).read()) for zi in z.infolist() if zi.filename.lower().endswith('.torrent')] elif f.filename.lower().endswith('.torrent'): torrent_files = [TorrentFile(name=f.filename, data=f.file.read())] return torrent_files
def __init__(self, value=None, defaultFormat='%a[SHORT], %d %b[SHORT] %Y %H:%M:%S %Z'): ' The value should be in the LOCAL timezone.\n\t\t' self.ourValue = value self.defaultFormat = defaultFormat
-1,599,152,371,501,616,000
The value should be in the LOCAL timezone.
lib/pubtal/DateContext.py
__init__
owlfish/pubtal
python
def __init__(self, value=None, defaultFormat='%a[SHORT], %d %b[SHORT] %Y %H:%M:%S %Z'): ' \n\t\t' self.ourValue = value self.defaultFormat = defaultFormat
def setUp(self): 'set up the test\n ' pymel.core.newFile(force=True) self.sm = pymel.core.PyNode('sequenceManager1')
-1,835,430,997,191,037,400
set up the test
tests/previs/test_sequence_manager_extension.py
setUp
Khosiyat/anima
python
def setUp(self): '\n ' pymel.core.newFile(force=True) self.sm = pymel.core.PyNode('sequenceManager1')
def test_from_xml_path_argument_skipped(self): 'testing if a TypeError will be raised when the path argument is\n skipped\n ' sm = pymel.core.PyNode('sequenceManager1') with self.assertRaises(TypeError) as cm: sm.from_xml() self.assertEqual(cm.exception.message, 'from_xml() takes exactly 2 arguments (1 given)')
-9,032,480,269,813,086,000
testing if a TypeError will be raised when the path argument is skipped
tests/previs/test_sequence_manager_extension.py
test_from_xml_path_argument_skipped
Khosiyat/anima
python
def test_from_xml_path_argument_skipped(self): 'testing if a TypeError will be raised when the path argument is\n skipped\n ' sm = pymel.core.PyNode('sequenceManager1') with self.assertRaises(TypeError) as cm: sm.from_xml() self.assertEqual(cm.exception.message, 'from_xml() takes exactly 2 arguments (1 given)')
def test_from_xml_path_argument_is_not_a_string(self): 'testing if a TypeError will be raised when the path argument is not\n a string\n ' sm = pymel.core.PyNode('sequenceManager1') with self.assertRaises(TypeError) as cm: sm.from_xml(30) self.assertEqual(cm.exception.message, 'path argument in SequenceManager.from_xml should be a string, not int')
2,190,324,599,069,293,000
testing if a TypeError will be raised when the path argument is not a string
tests/previs/test_sequence_manager_extension.py
test_from_xml_path_argument_is_not_a_string
Khosiyat/anima
python
def test_from_xml_path_argument_is_not_a_string(self): 'testing if a TypeError will be raised when the path argument is not\n a string\n ' sm = pymel.core.PyNode('sequenceManager1') with self.assertRaises(TypeError) as cm: sm.from_xml(30) self.assertEqual(cm.exception.message, 'path argument in SequenceManager.from_xml should be a string, not int')
def test_from_xml_path_argument_is_not_a_valid_path(self): 'testing if a IOError will be raised when the path argument is not\n a valid path\n ' sm = pymel.core.PyNode('sequenceManager1') with self.assertRaises(IOError) as cm: sm.from_xml('not a valid path') self.assertEqual(cm.exception.message, 'Please supply a valid path to an XML file!')
1,832,061,779,750,431,200
testing if a IOError will be raised when the path argument is not a valid path
tests/previs/test_sequence_manager_extension.py
test_from_xml_path_argument_is_not_a_valid_path
Khosiyat/anima
python
def test_from_xml_path_argument_is_not_a_valid_path(self): 'testing if a IOError will be raised when the path argument is not\n a valid path\n ' sm = pymel.core.PyNode('sequenceManager1') with self.assertRaises(IOError) as cm: sm.from_xml('not a valid path') self.assertEqual(cm.exception.message, 'Please supply a valid path to an XML file!')