body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
a7de602534dbd75f4c20da2810f4c29cbd17626cd190e44245813ab4f626f6ac
def import_reference(self): 'Import the referenec of the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.import_reference()
Import the referenec of the current reftrack :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
import_reference
JukeboxPipeline/jukebox-core
2
python
def import_reference(self): 'Import the referenec of the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.import_reference()
def import_reference(self): 'Import the referenec of the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' self.reftrack.import_reference()<|docstring|>Import the referenec of the current reftrack :returns: None :rtype: None :raises: None<|endoftext|>
9f5c736b81e266eabc3a255dd3ce0851cf57e50bd16b3aafa49fc07067b08cae
def replace(self): 'Replace the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.replace(tfi)
Replace the current reftrack :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
replace
JukeboxPipeline/jukebox-core
2
python
def replace(self): 'Replace the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.replace(tfi)
def replace(self): 'Replace the current reftrack\n\n :returns: None\n :rtype: None\n :raises: None\n ' tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.replace(tfi)<|docstring|>Replace the current reftrack :returns: None :rtype: None :raises: None<|endoftext|>
86e56e031780f3305a32b1435a2be285eb6460db2f10e0dbd8a0737d71fe6757
def __init__(self, parent=None): 'Initialize a new ReftrackDelegate\n\n :param parent:\n :type parent:\n :raises: None\n ' super(ReftrackDelegate, self).__init__(parent)
Initialize a new ReftrackDelegate :param parent: :type parent: :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
__init__
JukeboxPipeline/jukebox-core
2
python
def __init__(self, parent=None): 'Initialize a new ReftrackDelegate\n\n :param parent:\n :type parent:\n :raises: None\n ' super(ReftrackDelegate, self).__init__(parent)
def __init__(self, parent=None): 'Initialize a new ReftrackDelegate\n\n :param parent:\n :type parent:\n :raises: None\n ' super(ReftrackDelegate, self).__init__(parent)<|docstring|>Initialize a new ReftrackDelegate :param parent: :type parent: :raises: None<|endoftext|>
94970b937ed95e63b6aa8252239bec7f03919c7fbd7c84942baa6b3f6adadfee
def create_widget(self, parent=None): 'Return a widget that should get painted by the delegate\n\n You might want to use this in :meth:`WidgetDelegate.createEditor`\n\n :returns: The created widget | None\n :rtype: QtGui.QWidget | None\n :raises: None\n ' return ReftrackWidget(parent)
Return a widget that should get painted by the delegate You might want to use this in :meth:`WidgetDelegate.createEditor` :returns: The created widget | None :rtype: QtGui.QWidget | None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
create_widget
JukeboxPipeline/jukebox-core
2
python
def create_widget(self, parent=None): 'Return a widget that should get painted by the delegate\n\n You might want to use this in :meth:`WidgetDelegate.createEditor`\n\n :returns: The created widget | None\n :rtype: QtGui.QWidget | None\n :raises: None\n ' return ReftrackWidget(parent)
def create_widget(self, parent=None): 'Return a widget that should get painted by the delegate\n\n You might want to use this in :meth:`WidgetDelegate.createEditor`\n\n :returns: The created widget | None\n :rtype: QtGui.QWidget | None\n :raises: None\n ' return ReftrackWidget(parent)<|docstring|>Return a widget that should get painted by the delegate You might want to use this in :meth:`WidgetDelegate.createEditor` :returns: The created widget | None :rtype: QtGui.QWidget | None :raises: None<|endoftext|>
e101371bb35b89f01976a767c05e32d77ca69d31d440194a922daed34455689b
def set_widget_index(self, index): 'Set the index for the widget. The widget should retrieve data from the index and display it.\n\n You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.\n\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' self.widget.set_index(index)
Set the index for the widget. The widget should retrieve data from the index and display it. You might want use the same function as for :meth:`WidgetDelegate.setEditorData`. :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
set_widget_index
JukeboxPipeline/jukebox-core
2
python
def set_widget_index(self, index): 'Set the index for the widget. The widget should retrieve data from the index and display it.\n\n You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.\n\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' self.widget.set_index(index)
def set_widget_index(self, index): 'Set the index for the widget. The widget should retrieve data from the index and display it.\n\n You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.\n\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' self.widget.set_index(index)<|docstring|>Set the index for the widget. The widget should retrieve data from the index and display it. You might want use the same function as for :meth:`WidgetDelegate.setEditorData`. :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None<|endoftext|>
417c515e83c40e0b1a467041672d68a159acddbf12d577897cbe35121009c065
def create_editor_widget(self, parent, option, index): "Return the editor to be used for editing the data item with the given index.\n\n Note that the index contains information about the model being used.\n The editor's parent widget is specified by parent, and the item options by option.\n\n :param parent: the parent widget\n :type parent: QtGui.QWidget\n :param option: the options for painting\n :type option: QtGui.QStyleOptionViewItem\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n " return self.create_widget(parent)
Return the editor to be used for editing the data item with the given index. Note that the index contains information about the model being used. The editor's parent widget is specified by parent, and the item options by option. :param parent: the parent widget :type parent: QtGui.QWidget :param option: the options for painting :type option: QtGui.QStyleOptionViewItem :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
create_editor_widget
JukeboxPipeline/jukebox-core
2
python
def create_editor_widget(self, parent, option, index): "Return the editor to be used for editing the data item with the given index.\n\n Note that the index contains information about the model being used.\n The editor's parent widget is specified by parent, and the item options by option.\n\n :param parent: the parent widget\n :type parent: QtGui.QWidget\n :param option: the options for painting\n :type option: QtGui.QStyleOptionViewItem\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n " return self.create_widget(parent)
def create_editor_widget(self, parent, option, index): "Return the editor to be used for editing the data item with the given index.\n\n Note that the index contains information about the model being used.\n The editor's parent widget is specified by parent, and the item options by option.\n\n :param parent: the parent widget\n :type parent: QtGui.QWidget\n :param option: the options for painting\n :type option: QtGui.QStyleOptionViewItem\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n " return self.create_widget(parent)<|docstring|>Return the editor to be used for editing the data item with the given index. Note that the index contains information about the model being used. The editor's parent widget is specified by parent, and the item options by option. :param parent: the parent widget :type parent: QtGui.QWidget :param option: the options for painting :type option: QtGui.QStyleOptionViewItem :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None<|endoftext|>
2a04df0d133275324399c888d5758c25fcd0e619b62295ba4b02938b4f40fe76
def setEditorData(self, editor, index): 'Sets the contents of the given editor to the data for the item at the given index.\n\n Note that the index contains information about the model being used.\n\n :param editor: the editor widget\n :type editor: QtGui.QWidget\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' editor.set_index(index)
Sets the contents of the given editor to the data for the item at the given index. Note that the index contains information about the model being used. :param editor: the editor widget :type editor: QtGui.QWidget :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None
src/jukeboxcore/gui/widgets/reftrackwidget.py
setEditorData
JukeboxPipeline/jukebox-core
2
python
def setEditorData(self, editor, index): 'Sets the contents of the given editor to the data for the item at the given index.\n\n Note that the index contains information about the model being used.\n\n :param editor: the editor widget\n :type editor: QtGui.QWidget\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' editor.set_index(index)
def setEditorData(self, editor, index): 'Sets the contents of the given editor to the data for the item at the given index.\n\n Note that the index contains information about the model being used.\n\n :param editor: the editor widget\n :type editor: QtGui.QWidget\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None\n ' editor.set_index(index)<|docstring|>Sets the contents of the given editor to the data for the item at the given index. Note that the index contains information about the model being used. :param editor: the editor widget :type editor: QtGui.QWidget :param index: the index to paint :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None<|endoftext|>
ff302aaf7b8dd229ab551c5f7f22fcb08f4eec26d74d0a83c4279ea4b294d0ac
def get_features(self, progressbar=None): '\n Get the feature values\n ' feature_df_rows = [] df = self.dataset.get_df() rows = list(df.iterrows()) for (i, (_, row)) in enumerate(rows): feature_series = self._get_features_for_single_case(row) if (feature_series is not None): feature_df_rows.append(feature_series) if progressbar: fraction_complete = ((i + 1) / len(rows)) progressbar.progress(fraction_complete) feature_df = pd.concat(feature_df_rows, axis=1).T return feature_df
Get the feature values
webapp/extractor.py
get_features
pwoznicki/Radiomics
2
python
def get_features(self, progressbar=None): '\n \n ' feature_df_rows = [] df = self.dataset.get_df() rows = list(df.iterrows()) for (i, (_, row)) in enumerate(rows): feature_series = self._get_features_for_single_case(row) if (feature_series is not None): feature_df_rows.append(feature_series) if progressbar: fraction_complete = ((i + 1) / len(rows)) progressbar.progress(fraction_complete) feature_df = pd.concat(feature_df_rows, axis=1).T return feature_df
def get_features(self, progressbar=None): '\n \n ' feature_df_rows = [] df = self.dataset.get_df() rows = list(df.iterrows()) for (i, (_, row)) in enumerate(rows): feature_series = self._get_features_for_single_case(row) if (feature_series is not None): feature_df_rows.append(feature_series) if progressbar: fraction_complete = ((i + 1) / len(rows)) progressbar.progress(fraction_complete) feature_df = pd.concat(feature_df_rows, axis=1).T return feature_df<|docstring|>Get the feature values<|endoftext|>
a0839004404583dcfeb9246b3c8d2bb3ec5a55b1832c1fe4ad0552d7818a5f24
def test_handler_5xx(self): ' Check error view returns error message ' from werkzeug.exceptions import InternalServerError from pysite.views.error_handlers import http_5xx error_view = http_5xx.Error500View() error_message = error_view.get(InternalServerError) self.assertEqual(error_message[1], 500)
Check error view returns error message
tests/test_mixins.py
test_handler_5xx
landizz/site
0
python
def test_handler_5xx(self): ' ' from werkzeug.exceptions import InternalServerError from pysite.views.error_handlers import http_5xx error_view = http_5xx.Error500View() error_message = error_view.get(InternalServerError) self.assertEqual(error_message[1], 500)
def test_handler_5xx(self): ' ' from werkzeug.exceptions import InternalServerError from pysite.views.error_handlers import http_5xx error_view = http_5xx.Error500View() error_message = error_view.get(InternalServerError) self.assertEqual(error_message[1], 500)<|docstring|>Check error view returns error message<|endoftext|>
9f1fd78c62c9ad0134a3aa41aa8a457bc00623386b6d0a5156eb3b6bcc676aeb
def test_route_view_runtime_error(self): ' Check that wrong values for route view setup raises runtime error ' from pysite.base_route import RouteView rv = RouteView() with self.assertRaises(RuntimeError): rv.setup(manager, 'sdfsdf')
Check that wrong values for route view setup raises runtime error
tests/test_mixins.py
test_route_view_runtime_error
landizz/site
0
python
def test_route_view_runtime_error(self): ' ' from pysite.base_route import RouteView rv = RouteView() with self.assertRaises(RuntimeError): rv.setup(manager, 'sdfsdf')
def test_route_view_runtime_error(self): ' ' from pysite.base_route import RouteView rv = RouteView() with self.assertRaises(RuntimeError): rv.setup(manager, 'sdfsdf')<|docstring|>Check that wrong values for route view setup raises runtime error<|endoftext|>
ff6d1681358785730747f6782b0f670e796b5a1e210aa67b3fa5d93907643d94
def test_oauth_property(self): ' Make sure the oauth property works' from flask import Blueprint from pysite.route_manager import RouteView from pysite.oauth import OAuthBackend class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertIsInstance(tr.oauth, OAuthBackend)
Make sure the oauth property works
tests/test_mixins.py
test_oauth_property
landizz/site
0
python
def test_oauth_property(self): ' ' from flask import Blueprint from pysite.route_manager import RouteView from pysite.oauth import OAuthBackend class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertIsInstance(tr.oauth, OAuthBackend)
def test_oauth_property(self): ' ' from flask import Blueprint from pysite.route_manager import RouteView from pysite.oauth import OAuthBackend class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertIsInstance(tr.oauth, OAuthBackend)<|docstring|>Make sure the oauth property works<|endoftext|>
52461f0c8fab2afb70707e61582aaba36352adfcfe74c03da786b33fde1654d9
def test_user_data_property(self): ' Make sure the user_data property works' from flask import Blueprint from pysite.route_manager import RouteView class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertIsNone(tr.user_data)
Make sure the user_data property works
tests/test_mixins.py
test_user_data_property
landizz/site
0
python
def test_user_data_property(self): ' ' from flask import Blueprint from pysite.route_manager import RouteView class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertIsNone(tr.user_data)
def test_user_data_property(self): ' ' from flask import Blueprint from pysite.route_manager import RouteView class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertIsNone(tr.user_data)<|docstring|>Make sure the user_data property works<|endoftext|>
d52764bb6a3e1690ac4ffe01af16cfcb1bd429307443b4207fd0d41793b5d0ef
def test_logged_in_property(self): ' Make sure the user_data property works' from flask import Blueprint from pysite.route_manager import RouteView class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertFalse(tr.logged_in)
Make sure the user_data property works
tests/test_mixins.py
test_logged_in_property
landizz/site
0
python
def test_logged_in_property(self): ' ' from flask import Blueprint from pysite.route_manager import RouteView class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertFalse(tr.logged_in)
def test_logged_in_property(self): ' ' from flask import Blueprint from pysite.route_manager import RouteView class TestRoute(RouteView): name = 'test' path = '/test' tr = TestRoute() tr.setup(manager, Blueprint('test', 'test_name')) self.assertFalse(tr.logged_in)<|docstring|>Make sure the user_data property works<|endoftext|>
23ec926117bce4ccbbc88730c1348d4bad81bcf7fad20aa31f7276edd58e5759
def __init__(self): 'Constructor\n ' super(Command, self).__init__() self._scanner = None
Constructor
scale/ingest/management/commands/scale_scan.py
__init__
kfconsultant/scale
121
python
def __init__(self): '\n ' super(Command, self).__init__() self._scanner = None
def __init__(self): '\n ' super(Command, self).__init__() self._scanner = None<|docstring|>Constructor<|endoftext|>
d67da7540a6e80378a29b503e0652776e6fa562f37ca1210c251da57544b587f
def handle(self, *args, **options): 'See :meth:`django.core.management.base.BaseCommand.handle`.\n\n This method starts the Scan processor.\n ' signal.signal(signal.SIGTERM, self._onsigterm) scan_id = options.get('scan_id') dry_run = bool(strtobool(options.get('dry_run'))) local = options.get('local') if (not scan_id): logger.error('-i or --scan-id parameter must be specified for Scan configuration.') sys.exit(1) logger.info('Command starting: scale_scan') logger.info('Scan ID: %i', scan_id) logger.info('Dry Run: %s', str(dry_run)) logger.info('Local Test: %s', local) logger.info('Querying database for Scan configuration') scan = Scan.objects.select_related('job').get(pk=scan_id) self._scanner = scan.get_scan_configuration().get_scanner() self._scanner.scan_id = scan_id logger.info('Starting %s scanner', self._scanner.scanner_type) if (options['local'] and patch): workspace = self._scanner._scanned_workspace if (('broker' in workspace.json_config) and ('host_path' in workspace.json_config['broker'])): with patch.object(Workspace, '_get_volume_path', return_value=workspace.json_config['broker']['host_path']) as mock_method: self._scanner.run(dry_run=dry_run) logger.info('Scanner has stopped running') logger.info('Command completed: scale_scan') return self._scanner.run(dry_run=dry_run) logger.info('Scanner has stopped running') logger.info('Command completed: scale_scan')
See :meth:`django.core.management.base.BaseCommand.handle`. This method starts the Scan processor.
scale/ingest/management/commands/scale_scan.py
handle
kfconsultant/scale
121
python
def handle(self, *args, **options): 'See :meth:`django.core.management.base.BaseCommand.handle`.\n\n This method starts the Scan processor.\n ' signal.signal(signal.SIGTERM, self._onsigterm) scan_id = options.get('scan_id') dry_run = bool(strtobool(options.get('dry_run'))) local = options.get('local') if (not scan_id): logger.error('-i or --scan-id parameter must be specified for Scan configuration.') sys.exit(1) logger.info('Command starting: scale_scan') logger.info('Scan ID: %i', scan_id) logger.info('Dry Run: %s', str(dry_run)) logger.info('Local Test: %s', local) logger.info('Querying database for Scan configuration') scan = Scan.objects.select_related('job').get(pk=scan_id) self._scanner = scan.get_scan_configuration().get_scanner() self._scanner.scan_id = scan_id logger.info('Starting %s scanner', self._scanner.scanner_type) if (options['local'] and patch): workspace = self._scanner._scanned_workspace if (('broker' in workspace.json_config) and ('host_path' in workspace.json_config['broker'])): with patch.object(Workspace, '_get_volume_path', return_value=workspace.json_config['broker']['host_path']) as mock_method: self._scanner.run(dry_run=dry_run) logger.info('Scanner has stopped running') logger.info('Command completed: scale_scan') return self._scanner.run(dry_run=dry_run) logger.info('Scanner has stopped running') logger.info('Command completed: scale_scan')
def handle(self, *args, **options): 'See :meth:`django.core.management.base.BaseCommand.handle`.\n\n This method starts the Scan processor.\n ' signal.signal(signal.SIGTERM, self._onsigterm) scan_id = options.get('scan_id') dry_run = bool(strtobool(options.get('dry_run'))) local = options.get('local') if (not scan_id): logger.error('-i or --scan-id parameter must be specified for Scan configuration.') sys.exit(1) logger.info('Command starting: scale_scan') logger.info('Scan ID: %i', scan_id) logger.info('Dry Run: %s', str(dry_run)) logger.info('Local Test: %s', local) logger.info('Querying database for Scan configuration') scan = Scan.objects.select_related('job').get(pk=scan_id) self._scanner = scan.get_scan_configuration().get_scanner() self._scanner.scan_id = scan_id logger.info('Starting %s scanner', self._scanner.scanner_type) if (options['local'] and patch): workspace = self._scanner._scanned_workspace if (('broker' in workspace.json_config) and ('host_path' in workspace.json_config['broker'])): with patch.object(Workspace, '_get_volume_path', return_value=workspace.json_config['broker']['host_path']) as mock_method: self._scanner.run(dry_run=dry_run) logger.info('Scanner has stopped running') logger.info('Command completed: scale_scan') return self._scanner.run(dry_run=dry_run) logger.info('Scanner has stopped running') logger.info('Command completed: scale_scan')<|docstring|>See :meth:`django.core.management.base.BaseCommand.handle`. This method starts the Scan processor.<|endoftext|>
86f020b5f854d36a6b98362d46587e2e7fa3a92d21363ccfcf9248d8d5cb8085
def _onsigterm(self, signum, _frame): 'See signal callback registration: :py:func:`signal.signal`.\n\n This callback performs a clean shutdown when a TERM signal is received.\n ' logger.info('Scan command received sigterm, telling scanner to stop') if self._scanner: self._scanner.stop()
See signal callback registration: :py:func:`signal.signal`. This callback performs a clean shutdown when a TERM signal is received.
scale/ingest/management/commands/scale_scan.py
_onsigterm
kfconsultant/scale
121
python
def _onsigterm(self, signum, _frame): 'See signal callback registration: :py:func:`signal.signal`.\n\n This callback performs a clean shutdown when a TERM signal is received.\n ' logger.info('Scan command received sigterm, telling scanner to stop') if self._scanner: self._scanner.stop()
def _onsigterm(self, signum, _frame): 'See signal callback registration: :py:func:`signal.signal`.\n\n This callback performs a clean shutdown when a TERM signal is received.\n ' logger.info('Scan command received sigterm, telling scanner to stop') if self._scanner: self._scanner.stop()<|docstring|>See signal callback registration: :py:func:`signal.signal`. This callback performs a clean shutdown when a TERM signal is received.<|endoftext|>
676a36271e700b6f269a8a86aa1bf19fd83a0db0606c9c956fdede5b809bdc5c
def format_address(host: str, port: int) -> str: 'Return a formatted IP address given the host and port.\n\n Args:\n host: The host IP address or name.\n port: The port number.\n\n Returns:\n An IPv4/v6 formatted address based on the host.\n\n Example:\n >>> format_address("127.0.0.1", 8080)\n 127.0.0.1:8080\n >>> format_address("foo", 8080)\n foo:8080\n >>> format_address("::1", 8080)\n [::1]:8080\n ' try: ipaddress.IPv6Address(host) return f'[{host}]:{port}' except ipaddress.AddressValueError: return f'{host}:{port}'
Return a formatted IP address given the host and port. Args: host: The host IP address or name. port: The port number. Returns: An IPv4/v6 formatted address based on the host. Example: >>> format_address("127.0.0.1", 8080) 127.0.0.1:8080 >>> format_address("foo", 8080) foo:8080 >>> format_address("::1", 8080) [::1]:8080
tglib/tglib/utils/ip.py
format_address
kkkkv/tgnms
12
python
def format_address(host: str, port: int) -> str: 'Return a formatted IP address given the host and port.\n\n Args:\n host: The host IP address or name.\n port: The port number.\n\n Returns:\n An IPv4/v6 formatted address based on the host.\n\n Example:\n >>> format_address("127.0.0.1", 8080)\n 127.0.0.1:8080\n >>> format_address("foo", 8080)\n foo:8080\n >>> format_address("::1", 8080)\n [::1]:8080\n ' try: ipaddress.IPv6Address(host) return f'[{host}]:{port}' except ipaddress.AddressValueError: return f'{host}:{port}'
def format_address(host: str, port: int) -> str: 'Return a formatted IP address given the host and port.\n\n Args:\n host: The host IP address or name.\n port: The port number.\n\n Returns:\n An IPv4/v6 formatted address based on the host.\n\n Example:\n >>> format_address("127.0.0.1", 8080)\n 127.0.0.1:8080\n >>> format_address("foo", 8080)\n foo:8080\n >>> format_address("::1", 8080)\n [::1]:8080\n ' try: ipaddress.IPv6Address(host) return f'[{host}]:{port}' except ipaddress.AddressValueError: return f'{host}:{port}'<|docstring|>Return a formatted IP address given the host and port. Args: host: The host IP address or name. port: The port number. Returns: An IPv4/v6 formatted address based on the host. Example: >>> format_address("127.0.0.1", 8080) 127.0.0.1:8080 >>> format_address("foo", 8080) foo:8080 >>> format_address("::1", 8080) [::1]:8080<|endoftext|>
55c01e2fa5a56bcbd50e0c7cc9f49773ed88630e9838f04806a8b042d100a2d7
def data_loader(self, **kwargs): "\n Description\n ----------\n This method enables efficient loading of data, as it allows users to select variables\n and clusters of interest (such that not all data needs to be loaded).\n ----------\n\n Parameters\n ----------\n **kwargs (dictionary)\n extract_clusters (str / int / list)\n Cluster IDs to extract (if int, takes first n clusters; if 'all', takes all); defaults to 'None'.\n extract_variables (str / list)\n Variables to extract (if 'all', take all); defaults to 'None'.\n ----------\n\n Returns\n ----------\n file_info (str)\n The shortened version of the file name.\n data (dict)\n A dictionary with variable names as keys, and variable arrays as values.\n ----------\n " extract_clusters = (kwargs['extract_clusters'] if (('extract_clusters' in kwargs.keys()) and ((kwargs['extract_clusters'] == 'all') or (type(kwargs['extract_clusters']) == int) or (type(kwargs['extract_clusters']) == list))) else 'None') extract_variables = (kwargs['extract_variables'] if (('extract_variables' in kwargs.keys()) and ((kwargs['extract_variables'] == 'all') or (type(kwargs['extract_variables']) == list))) else 'None') if ((extract_clusters != 'None') or (extract_variables != 'None')): data = {} if (self.session != 0): if os.path.exists(self.session): with open(self.session, 'rb') as session_file: loaded = pickle.load(session_file) for (key, value) in loaded.items(): if ((key == 'cell_activities') or (key == 'file_info')): continue elif (key == 'cell_names'): if (extract_clusters != 'None'): if ('cluster_spikes' not in data): data['cluster_spikes'] = {} if (extract_clusters == 'all'): for (name_idx, name) in enumerate(loaded['cell_names']): data['cluster_spikes'][name] = loaded['cell_activities'][name_idx].ravel() elif (type(extract_clusters) == list): for name in extract_clusters: name_idx = loaded['cell_names'].index(name) data['cluster_spikes'][name] = loaded['cell_activities'][name_idx].ravel() else: for name_idx in range(extract_clusters): data['cluster_spikes'][loaded['cell_names'][name_idx]] = loaded['cell_activities'][name_idx].ravel() elif (extract_variables != 'None'): data['total_frame_num'] = loaded['head_origin'].shape[0] if ((extract_variables == 'all') or (key in extract_variables)): data[key] = value else: print(f'Invalid location for file {self.session}. Please try again.') sys.exit() else: print('No session provided.') sys.exit() return (loaded['file_info'], data)
Description ---------- This method enables efficient loading of data, as it allows users to select variables and clusters of interest (such that not all data needs to be loaded). ---------- Parameters ---------- **kwargs (dictionary) extract_clusters (str / int / list) Cluster IDs to extract (if int, takes first n clusters; if 'all', takes all); defaults to 'None'. extract_variables (str / list) Variables to extract (if 'all', take all); defaults to 'None'. ---------- Returns ---------- file_info (str) The shortened version of the file name. data (dict) A dictionary with variable names as keys, and variable arrays as values. ----------
sessions2load.py
data_loader
bartulem/KISN-pancortical-kinematics
2
python
def data_loader(self, **kwargs): "\n Description\n ----------\n This method enables efficient loading of data, as it allows users to select variables\n and clusters of interest (such that not all data needs to be loaded).\n ----------\n\n Parameters\n ----------\n **kwargs (dictionary)\n extract_clusters (str / int / list)\n Cluster IDs to extract (if int, takes first n clusters; if 'all', takes all); defaults to 'None'.\n extract_variables (str / list)\n Variables to extract (if 'all', take all); defaults to 'None'.\n ----------\n\n Returns\n ----------\n file_info (str)\n The shortened version of the file name.\n data (dict)\n A dictionary with variable names as keys, and variable arrays as values.\n ----------\n " extract_clusters = (kwargs['extract_clusters'] if (('extract_clusters' in kwargs.keys()) and ((kwargs['extract_clusters'] == 'all') or (type(kwargs['extract_clusters']) == int) or (type(kwargs['extract_clusters']) == list))) else 'None') extract_variables = (kwargs['extract_variables'] if (('extract_variables' in kwargs.keys()) and ((kwargs['extract_variables'] == 'all') or (type(kwargs['extract_variables']) == list))) else 'None') if ((extract_clusters != 'None') or (extract_variables != 'None')): data = {} if (self.session != 0): if os.path.exists(self.session): with open(self.session, 'rb') as session_file: loaded = pickle.load(session_file) for (key, value) in loaded.items(): if ((key == 'cell_activities') or (key == 'file_info')): continue elif (key == 'cell_names'): if (extract_clusters != 'None'): if ('cluster_spikes' not in data): data['cluster_spikes'] = {} if (extract_clusters == 'all'): for (name_idx, name) in enumerate(loaded['cell_names']): data['cluster_spikes'][name] = loaded['cell_activities'][name_idx].ravel() elif (type(extract_clusters) == list): for name in extract_clusters: name_idx = loaded['cell_names'].index(name) data['cluster_spikes'][name] = loaded['cell_activities'][name_idx].ravel() else: for name_idx in range(extract_clusters): data['cluster_spikes'][loaded['cell_names'][name_idx]] = loaded['cell_activities'][name_idx].ravel() elif (extract_variables != 'None'): data['total_frame_num'] = loaded['head_origin'].shape[0] if ((extract_variables == 'all') or (key in extract_variables)): data[key] = value else: print(f'Invalid location for file {self.session}. Please try again.') sys.exit() else: print('No session provided.') sys.exit() return (loaded['file_info'], data)
def data_loader(self, **kwargs): "\n Description\n ----------\n This method enables efficient loading of data, as it allows users to select variables\n and clusters of interest (such that not all data needs to be loaded).\n ----------\n\n Parameters\n ----------\n **kwargs (dictionary)\n extract_clusters (str / int / list)\n Cluster IDs to extract (if int, takes first n clusters; if 'all', takes all); defaults to 'None'.\n extract_variables (str / list)\n Variables to extract (if 'all', take all); defaults to 'None'.\n ----------\n\n Returns\n ----------\n file_info (str)\n The shortened version of the file name.\n data (dict)\n A dictionary with variable names as keys, and variable arrays as values.\n ----------\n " extract_clusters = (kwargs['extract_clusters'] if (('extract_clusters' in kwargs.keys()) and ((kwargs['extract_clusters'] == 'all') or (type(kwargs['extract_clusters']) == int) or (type(kwargs['extract_clusters']) == list))) else 'None') extract_variables = (kwargs['extract_variables'] if (('extract_variables' in kwargs.keys()) and ((kwargs['extract_variables'] == 'all') or (type(kwargs['extract_variables']) == list))) else 'None') if ((extract_clusters != 'None') or (extract_variables != 'None')): data = {} if (self.session != 0): if os.path.exists(self.session): with open(self.session, 'rb') as session_file: loaded = pickle.load(session_file) for (key, value) in loaded.items(): if ((key == 'cell_activities') or (key == 'file_info')): continue elif (key == 'cell_names'): if (extract_clusters != 'None'): if ('cluster_spikes' not in data): data['cluster_spikes'] = {} if (extract_clusters == 'all'): for (name_idx, name) in enumerate(loaded['cell_names']): data['cluster_spikes'][name] = loaded['cell_activities'][name_idx].ravel() elif (type(extract_clusters) == list): for name in extract_clusters: name_idx = loaded['cell_names'].index(name) data['cluster_spikes'][name] = loaded['cell_activities'][name_idx].ravel() else: for name_idx in range(extract_clusters): data['cluster_spikes'][loaded['cell_names'][name_idx]] = loaded['cell_activities'][name_idx].ravel() elif (extract_variables != 'None'): data['total_frame_num'] = loaded['head_origin'].shape[0] if ((extract_variables == 'all') or (key in extract_variables)): data[key] = value else: print(f'Invalid location for file {self.session}. Please try again.') sys.exit() else: print('No session provided.') sys.exit() return (loaded['file_info'], data)<|docstring|>Description ---------- This method enables efficient loading of data, as it allows users to select variables and clusters of interest (such that not all data needs to be loaded). ---------- Parameters ---------- **kwargs (dictionary) extract_clusters (str / int / list) Cluster IDs to extract (if int, takes first n clusters; if 'all', takes all); defaults to 'None'. extract_variables (str / list) Variables to extract (if 'all', take all); defaults to 'None'. ---------- Returns ---------- file_info (str) The shortened version of the file name. data (dict) A dictionary with variable names as keys, and variable arrays as values. ----------<|endoftext|>
138c576200cffbb500d7198158891f25c8ebcdb97fffa5818e1bbbc0f60b0480
def __str__(self): '\n Returns\n ----------\n type (str)\n Variable type & description.\n ----------\n ' return f'''Type: {self.data_info[self.describe]['type']}. Description: {self.data_info[self.describe]['description']}'''
Returns ---------- type (str) Variable type & description. ----------
sessions2load.py
__str__
bartulem/KISN-pancortical-kinematics
2
python
def __str__(self): '\n Returns\n ----------\n type (str)\n Variable type & description.\n ----------\n ' return f'Type: {self.data_info[self.describe]['type']}. Description: {self.data_info[self.describe]['description']}'
def __str__(self): '\n Returns\n ----------\n type (str)\n Variable type & description.\n ----------\n ' return f'Type: {self.data_info[self.describe]['type']}. Description: {self.data_info[self.describe]['description']}'<|docstring|>Returns ---------- type (str) Variable type & description. ----------<|endoftext|>
c95fab714ebef0672251fe9d1ecd39d6f3ba3093ae89914e4ce4d106e0703d8c
def cpu_count(): ' Return the number of CPUs.\n ' if (multiprocessing is None): return 1 return multiprocessing.cpu_count()
Return the number of CPUs.
plugin/AssemblerSPAdes/bin/SPAdes-3.1.0-Linux/share/spades/joblib3/parallel.py
cpu_count
konradotto/TS
125
python
def cpu_count(): ' \n ' if (multiprocessing is None): return 1 return multiprocessing.cpu_count()
def cpu_count(): ' \n ' if (multiprocessing is None): return 1 return multiprocessing.cpu_count()<|docstring|>Return the number of CPUs.<|endoftext|>
df12dc36f01ffd6f4426433208fd343d7c00899136b820a109ec886a7ffbfe95
def _verbosity_filter(index, verbose): ' Returns False for indices increasingly apart, the distance\n depending on the value of verbose.\n\n We use a lag increasing as the square of index\n ' if (not verbose): return True elif (verbose > 10): return False if (index == 0): return False verbose = (0.5 * ((11 - verbose) ** 2)) scale = sqrt((index / verbose)) next_scale = sqrt(((index + 1) / verbose)) return (int(next_scale) == int(scale))
Returns False for indices increasingly apart, the distance depending on the value of verbose. We use a lag increasing as the square of index
plugin/AssemblerSPAdes/bin/SPAdes-3.1.0-Linux/share/spades/joblib3/parallel.py
_verbosity_filter
konradotto/TS
125
python
def _verbosity_filter(index, verbose): ' Returns False for indices increasingly apart, the distance\n depending on the value of verbose.\n\n We use a lag increasing as the square of index\n ' if (not verbose): return True elif (verbose > 10): return False if (index == 0): return False verbose = (0.5 * ((11 - verbose) ** 2)) scale = sqrt((index / verbose)) next_scale = sqrt(((index + 1) / verbose)) return (int(next_scale) == int(scale))
def _verbosity_filter(index, verbose): ' Returns False for indices increasingly apart, the distance\n depending on the value of verbose.\n\n We use a lag increasing as the square of index\n ' if (not verbose): return True elif (verbose > 10): return False if (index == 0): return False verbose = (0.5 * ((11 - verbose) ** 2)) scale = sqrt((index / verbose)) next_scale = sqrt(((index + 1) / verbose)) return (int(next_scale) == int(scale))<|docstring|>Returns False for indices increasingly apart, the distance depending on the value of verbose. We use a lag increasing as the square of index<|endoftext|>
1dc15c5cfac1800f22621dbe7524fe4bfb37cab751294977c46852bbdb429099
def delayed(function): ' Decorator used to capture the arguments of a function.\n ' pickle.dumps(function) def delayed_function(*args, **kwargs): return (function, args, kwargs) try: delayed_function = functools.wraps(function)(delayed_function) except AttributeError: ' functools.wraps fails on some callable objects ' return delayed_function
Decorator used to capture the arguments of a function.
plugin/AssemblerSPAdes/bin/SPAdes-3.1.0-Linux/share/spades/joblib3/parallel.py
delayed
konradotto/TS
125
python
def delayed(function): ' \n ' pickle.dumps(function) def delayed_function(*args, **kwargs): return (function, args, kwargs) try: delayed_function = functools.wraps(function)(delayed_function) except AttributeError: ' functools.wraps fails on some callable objects ' return delayed_function
def delayed(function): ' \n ' pickle.dumps(function) def delayed_function(*args, **kwargs): return (function, args, kwargs) try: delayed_function = functools.wraps(function)(delayed_function) except AttributeError: ' functools.wraps fails on some callable objects ' return delayed_function<|docstring|>Decorator used to capture the arguments of a function.<|endoftext|>
97c5fb67891fedab6013863f73ebd1beac053faff063dfd3d26d59d9db78b234
def dispatch(self, func, args, kwargs): ' Queue the function for computing, with or without multiprocessing\n ' if (self._pool is None): job = ImmediateApply(func, args, kwargs) index = len(self._jobs) if (not _verbosity_filter(index, self.verbose)): self._print('Done %3i jobs | elapsed: %s', ((index + 1), short_format_time((time.time() - self._start_time)))) self._jobs.append(job) self.n_dispatched += 1 else: if self._aborting: return try: self._lock.acquire() job = self._pool.apply_async(SafeFunction(func), args, kwargs, callback=CallBack(self.n_dispatched, self)) self._jobs.append(job) self.n_dispatched += 1 except AssertionError: print('[Parallel] Pool seems closed') finally: self._lock.release()
Queue the function for computing, with or without multiprocessing
plugin/AssemblerSPAdes/bin/SPAdes-3.1.0-Linux/share/spades/joblib3/parallel.py
dispatch
konradotto/TS
125
python
def dispatch(self, func, args, kwargs): ' \n ' if (self._pool is None): job = ImmediateApply(func, args, kwargs) index = len(self._jobs) if (not _verbosity_filter(index, self.verbose)): self._print('Done %3i jobs | elapsed: %s', ((index + 1), short_format_time((time.time() - self._start_time)))) self._jobs.append(job) self.n_dispatched += 1 else: if self._aborting: return try: self._lock.acquire() job = self._pool.apply_async(SafeFunction(func), args, kwargs, callback=CallBack(self.n_dispatched, self)) self._jobs.append(job) self.n_dispatched += 1 except AssertionError: print('[Parallel] Pool seems closed') finally: self._lock.release()
def dispatch(self, func, args, kwargs): ' \n ' if (self._pool is None): job = ImmediateApply(func, args, kwargs) index = len(self._jobs) if (not _verbosity_filter(index, self.verbose)): self._print('Done %3i jobs | elapsed: %s', ((index + 1), short_format_time((time.time() - self._start_time)))) self._jobs.append(job) self.n_dispatched += 1 else: if self._aborting: return try: self._lock.acquire() job = self._pool.apply_async(SafeFunction(func), args, kwargs, callback=CallBack(self.n_dispatched, self)) self._jobs.append(job) self.n_dispatched += 1 except AssertionError: print('[Parallel] Pool seems closed') finally: self._lock.release()<|docstring|>Queue the function for computing, with or without multiprocessing<|endoftext|>
e34b969d65344a09e720514914bb1d835aea72e67be9927108833717aba3f349
def dispatch_next(self): ' Dispatch more data for parallel processing\n ' self._dispatch_amount += 1 while self._dispatch_amount: try: (func, args, kwargs) = next(self._iterable) self.dispatch(func, args, kwargs) self._dispatch_amount -= 1 except ValueError: ' Race condition in accessing a generator, we skip,\n the dispatch will be done later.\n ' except StopIteration: self._iterable = None return
Dispatch more data for parallel processing
plugin/AssemblerSPAdes/bin/SPAdes-3.1.0-Linux/share/spades/joblib3/parallel.py
dispatch_next
konradotto/TS
125
python
def dispatch_next(self): ' \n ' self._dispatch_amount += 1 while self._dispatch_amount: try: (func, args, kwargs) = next(self._iterable) self.dispatch(func, args, kwargs) self._dispatch_amount -= 1 except ValueError: ' Race condition in accessing a generator, we skip,\n the dispatch will be done later.\n ' except StopIteration: self._iterable = None return
def dispatch_next(self): ' \n ' self._dispatch_amount += 1 while self._dispatch_amount: try: (func, args, kwargs) = next(self._iterable) self.dispatch(func, args, kwargs) self._dispatch_amount -= 1 except ValueError: ' Race condition in accessing a generator, we skip,\n the dispatch will be done later.\n ' except StopIteration: self._iterable = None return<|docstring|>Dispatch more data for parallel processing<|endoftext|>
5b9dae0625a2a9483581eb9b8011da86ff3061733f2bd8ae3874940d6258705f
def _print(self, msg, msg_args): ' Display the message on stout or stderr depending on verbosity\n ' if (not self.verbose): return if (self.verbose < 50): writer = sys.stderr.write else: writer = sys.stdout.write msg = (msg % msg_args) writer(('[%s]: %s\n' % (self, msg)))
Display the message on stout or stderr depending on verbosity
plugin/AssemblerSPAdes/bin/SPAdes-3.1.0-Linux/share/spades/joblib3/parallel.py
_print
konradotto/TS
125
python
def _print(self, msg, msg_args): ' \n ' if (not self.verbose): return if (self.verbose < 50): writer = sys.stderr.write else: writer = sys.stdout.write msg = (msg % msg_args) writer(('[%s]: %s\n' % (self, msg)))
def _print(self, msg, msg_args): ' \n ' if (not self.verbose): return if (self.verbose < 50): writer = sys.stderr.write else: writer = sys.stdout.write msg = (msg % msg_args) writer(('[%s]: %s\n' % (self, msg)))<|docstring|>Display the message on stout or stderr depending on verbosity<|endoftext|>
63ae3182ad340370128ebfbd6e2dcd2ca977508b27ea0c2dfa6f14084c2fd272
def print_progress(self, index): 'Display the process of the parallel execution only a fraction\n of time, controlled by self.verbose.\n ' if (not self.verbose): return elapsed_time = (time.time() - self._start_time) if self._iterable: if _verbosity_filter(index, self.verbose): return self._print('Done %3i jobs | elapsed: %s', ((index + 1), short_format_time(elapsed_time))) else: queue_length = self.n_dispatched if (not (index == 0)): cursor = (((queue_length - index) + 1) - self._pre_dispatch_amount) frequency = ((queue_length // self.verbose) + 1) is_last_item = ((index + 1) == queue_length) if (is_last_item or (cursor % frequency)): return remaining_time = ((elapsed_time / (index + 1)) * ((self.n_dispatched - index) - 1.0)) self._print('Done %3i out of %3i | elapsed: %s remaining: %s', ((index + 1), queue_length, short_format_time(elapsed_time), short_format_time(remaining_time)))
Display the process of the parallel execution only a fraction of time, controlled by self.verbose.
plugin/AssemblerSPAdes/bin/SPAdes-3.1.0-Linux/share/spades/joblib3/parallel.py
print_progress
konradotto/TS
125
python
def print_progress(self, index): 'Display the process of the parallel execution only a fraction\n of time, controlled by self.verbose.\n ' if (not self.verbose): return elapsed_time = (time.time() - self._start_time) if self._iterable: if _verbosity_filter(index, self.verbose): return self._print('Done %3i jobs | elapsed: %s', ((index + 1), short_format_time(elapsed_time))) else: queue_length = self.n_dispatched if (not (index == 0)): cursor = (((queue_length - index) + 1) - self._pre_dispatch_amount) frequency = ((queue_length // self.verbose) + 1) is_last_item = ((index + 1) == queue_length) if (is_last_item or (cursor % frequency)): return remaining_time = ((elapsed_time / (index + 1)) * ((self.n_dispatched - index) - 1.0)) self._print('Done %3i out of %3i | elapsed: %s remaining: %s', ((index + 1), queue_length, short_format_time(elapsed_time), short_format_time(remaining_time)))
def print_progress(self, index): 'Display the process of the parallel execution only a fraction\n of time, controlled by self.verbose.\n ' if (not self.verbose): return elapsed_time = (time.time() - self._start_time) if self._iterable: if _verbosity_filter(index, self.verbose): return self._print('Done %3i jobs | elapsed: %s', ((index + 1), short_format_time(elapsed_time))) else: queue_length = self.n_dispatched if (not (index == 0)): cursor = (((queue_length - index) + 1) - self._pre_dispatch_amount) frequency = ((queue_length // self.verbose) + 1) is_last_item = ((index + 1) == queue_length) if (is_last_item or (cursor % frequency)): return remaining_time = ((elapsed_time / (index + 1)) * ((self.n_dispatched - index) - 1.0)) self._print('Done %3i out of %3i | elapsed: %s remaining: %s', ((index + 1), queue_length, short_format_time(elapsed_time), short_format_time(remaining_time)))<|docstring|>Display the process of the parallel execution only a fraction of time, controlled by self.verbose.<|endoftext|>
07281937d3dd696b8f6c77a53145d2492b858418d9a949a204793a10c7e4f16c
def get_dwd_glm_basedir(sector='C', lat=None, lon=None, period='1min'): 'Return the directory where processed GLM data shall be stored.' base = os.environ['NAS_DATA'] if (sector not in ('C', 'F', 'M1', 'M2')): raise ValueError(f"Invalid sector: {sector!s}. Expected 'C', 'F', 'M1', or 'M2'.") bd = (pathlib.Path(base) / 'GLM-processed') bd /= sector if sector.startswith('M'): bd /= f'{lat:.1f}_{lon:.1f}' return (bd / period)
Return the directory where processed GLM data shall be stored.
src/sattools/glm.py
get_dwd_glm_basedir
gerritholl/sattools
0
python
def get_dwd_glm_basedir(sector='C', lat=None, lon=None, period='1min'): base = os.environ['NAS_DATA'] if (sector not in ('C', 'F', 'M1', 'M2')): raise ValueError(f"Invalid sector: {sector!s}. Expected 'C', 'F', 'M1', or 'M2'.") bd = (pathlib.Path(base) / 'GLM-processed') bd /= sector if sector.startswith('M'): bd /= f'{lat:.1f}_{lon:.1f}' return (bd / period)
def get_dwd_glm_basedir(sector='C', lat=None, lon=None, period='1min'): base = os.environ['NAS_DATA'] if (sector not in ('C', 'F', 'M1', 'M2')): raise ValueError(f"Invalid sector: {sector!s}. Expected 'C', 'F', 'M1', or 'M2'.") bd = (pathlib.Path(base) / 'GLM-processed') bd /= sector if sector.startswith('M'): bd /= f'{lat:.1f}_{lon:.1f}' return (bd / period)<|docstring|>Return the directory where processed GLM data shall be stored.<|endoftext|>
3bc04eb6276bc47ad77aa459e7d971714fe4523a7998f0c7347d7073c4b4b00d
def get_pattern_dwd_glm(sector='C', lat=None, lon=None, period='1min'): 'Return filename pattern for storing processed GLM data.' bd = get_dwd_glm_basedir(sector=sector, lat=lat, lon=lon, period=period) seclab = (sector if (sector in ('C', 'F', 'M1')) else 'M1') return str((bd / f'{{year}}/{{month}}/{{day}}/{{hour}}/OR_GLM-L2-GLM{seclab:s}-M3_G16_s{{year}}{{doy}}{{hour}}{{minute}}{{second}}*_e{{end_year}}{{end_doy}}{{end_hour}}{{end_minute}}{{end_second}}*_c*.nc'))
Return filename pattern for storing processed GLM data.
src/sattools/glm.py
get_pattern_dwd_glm
gerritholl/sattools
0
python
def get_pattern_dwd_glm(sector='C', lat=None, lon=None, period='1min'): bd = get_dwd_glm_basedir(sector=sector, lat=lat, lon=lon, period=period) seclab = (sector if (sector in ('C', 'F', 'M1')) else 'M1') return str((bd / f'{{year}}/{{month}}/{{day}}/{{hour}}/OR_GLM-L2-GLM{seclab:s}-M3_G16_s{{year}}{{doy}}{{hour}}{{minute}}{{second}}*_e{{end_year}}{{end_doy}}{{end_hour}}{{end_minute}}{{end_second}}*_c*.nc'))
def get_pattern_dwd_glm(sector='C', lat=None, lon=None, period='1min'): bd = get_dwd_glm_basedir(sector=sector, lat=lat, lon=lon, period=period) seclab = (sector if (sector in ('C', 'F', 'M1')) else 'M1') return str((bd / f'{{year}}/{{month}}/{{day}}/{{hour}}/OR_GLM-L2-GLM{seclab:s}-M3_G16_s{{year}}{{doy}}{{hour}}{{minute}}{{second}}*_e{{end_year}}{{end_doy}}{{end_hour}}{{end_minute}}{{end_second}}*_c*.nc'))<|docstring|>Return filename pattern for storing processed GLM data.<|endoftext|>
a8b267042c86b781f6baa0dec6c49e00f41dd8fd66c6a55b5f6eb74907d883c2
def ensure_glm_lcfa_for_period(start_date, end_date): 'Make sure GLM LCFA files for period are present locally.\n\n Yields the local paths for the (cached or downloaded) files.\n ' logger.debug(f'Ensuring local LCFA availability {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}') cachedir = appdirs.user_cache_dir('GLM-file-cache') s3 = s3fs.S3FileSystem(anon=True) wfcfs = fsspec.implementations.cached.WholeFileCacheFileSystem(fs=s3, cache_storage=cachedir, cache_check=86400, check_files=False, expiry_time=False, same_names=True) glm_lcfa = FileSet(path=pattern_s3_glm_lcfa, name='glm_lcfa', fs=s3) for f in glm_lcfa.find(start_date, end_date): if (not (f.times[1] > start_date)): continue logger.debug(f'Downloading {f!s}') with wfcfs.open(f, mode='rb'): exp = (pathlib.Path(cachedir) / pathlib.Path(f).name) logger.debug(f'Writing to {exp!s}') if (not exp.exists()): raise FileNotFoundError(f'Not found! {exp!s}') (yield exp)
Make sure GLM LCFA files for period are present locally. Yields the local paths for the (cached or downloaded) files.
src/sattools/glm.py
ensure_glm_lcfa_for_period
gerritholl/sattools
0
python
def ensure_glm_lcfa_for_period(start_date, end_date): 'Make sure GLM LCFA files for period are present locally.\n\n Yields the local paths for the (cached or downloaded) files.\n ' logger.debug(f'Ensuring local LCFA availability {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}') cachedir = appdirs.user_cache_dir('GLM-file-cache') s3 = s3fs.S3FileSystem(anon=True) wfcfs = fsspec.implementations.cached.WholeFileCacheFileSystem(fs=s3, cache_storage=cachedir, cache_check=86400, check_files=False, expiry_time=False, same_names=True) glm_lcfa = FileSet(path=pattern_s3_glm_lcfa, name='glm_lcfa', fs=s3) for f in glm_lcfa.find(start_date, end_date): if (not (f.times[1] > start_date)): continue logger.debug(f'Downloading {f!s}') with wfcfs.open(f, mode='rb'): exp = (pathlib.Path(cachedir) / pathlib.Path(f).name) logger.debug(f'Writing to {exp!s}') if (not exp.exists()): raise FileNotFoundError(f'Not found! {exp!s}') (yield exp)
def ensure_glm_lcfa_for_period(start_date, end_date): 'Make sure GLM LCFA files for period are present locally.\n\n Yields the local paths for the (cached or downloaded) files.\n ' logger.debug(f'Ensuring local LCFA availability {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}') cachedir = appdirs.user_cache_dir('GLM-file-cache') s3 = s3fs.S3FileSystem(anon=True) wfcfs = fsspec.implementations.cached.WholeFileCacheFileSystem(fs=s3, cache_storage=cachedir, cache_check=86400, check_files=False, expiry_time=False, same_names=True) glm_lcfa = FileSet(path=pattern_s3_glm_lcfa, name='glm_lcfa', fs=s3) for f in glm_lcfa.find(start_date, end_date): if (not (f.times[1] > start_date)): continue logger.debug(f'Downloading {f!s}') with wfcfs.open(f, mode='rb'): exp = (pathlib.Path(cachedir) / pathlib.Path(f).name) logger.debug(f'Writing to {exp!s}') if (not exp.exists()): raise FileNotFoundError(f'Not found! {exp!s}') (yield exp)<|docstring|>Make sure GLM LCFA files for period are present locally. Yields the local paths for the (cached or downloaded) files.<|endoftext|>
032088abd8553379e2b75f9fb9d1b354570b5c0b2f54c3923921dcbf92f34bf9
def ensure_glm_for_period(start_date, end_date, sector='C', lat=None, lon=None): 'Get gridded GLM for period, unless already existing.\n\n Yields resulting GLM files as strings.\n ' logger.debug(f'Locating GLM gaps between {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}, sector {sector:s}') for gap in find_glm_coverage_gaps(start_date, end_date, sector=sector, lat=lat, lon=lon): logger.debug(f'Found gap between {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}') files = list(ensure_glm_lcfa_for_period(gap.left, gap.right)) if (sector in 'CF'): run_glmtools(files, max_files=60, sector=sector) else: run_glmtools(files, max_files=60, sector=sector, lat=lat, lon=lon) logger.debug(f'GLM {sector:s} should now be fully covered') for gap in find_glm_coverage_gaps(start_date, end_date, sector=sector, lat=lat, lon=lon): raise RuntimeError(f'I have tried to ensure GLM {sector:s} by running glmtools, but data still appear to be missing for {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S} :( ') if (sector in 'CF'): pat = get_pattern_dwd_glm(sector) else: pat = get_pattern_dwd_glm(sector, lat=lat, lon=lon) glm = FileSet(path=pat, name='glm') for fileinfo in glm.find(start_date, end_date, no_files_error=True): (yield os.fspath(fileinfo))
Get gridded GLM for period, unless already existing. Yields resulting GLM files as strings.
src/sattools/glm.py
ensure_glm_for_period
gerritholl/sattools
0
python
def ensure_glm_for_period(start_date, end_date, sector='C', lat=None, lon=None): 'Get gridded GLM for period, unless already existing.\n\n Yields resulting GLM files as strings.\n ' logger.debug(f'Locating GLM gaps between {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}, sector {sector:s}') for gap in find_glm_coverage_gaps(start_date, end_date, sector=sector, lat=lat, lon=lon): logger.debug(f'Found gap between {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}') files = list(ensure_glm_lcfa_for_period(gap.left, gap.right)) if (sector in 'CF'): run_glmtools(files, max_files=60, sector=sector) else: run_glmtools(files, max_files=60, sector=sector, lat=lat, lon=lon) logger.debug(f'GLM {sector:s} should now be fully covered') for gap in find_glm_coverage_gaps(start_date, end_date, sector=sector, lat=lat, lon=lon): raise RuntimeError(f'I have tried to ensure GLM {sector:s} by running glmtools, but data still appear to be missing for {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S} :( ') if (sector in 'CF'): pat = get_pattern_dwd_glm(sector) else: pat = get_pattern_dwd_glm(sector, lat=lat, lon=lon) glm = FileSet(path=pat, name='glm') for fileinfo in glm.find(start_date, end_date, no_files_error=True): (yield os.fspath(fileinfo))
def ensure_glm_for_period(start_date, end_date, sector='C', lat=None, lon=None): 'Get gridded GLM for period, unless already existing.\n\n Yields resulting GLM files as strings.\n ' logger.debug(f'Locating GLM gaps between {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}, sector {sector:s}') for gap in find_glm_coverage_gaps(start_date, end_date, sector=sector, lat=lat, lon=lon): logger.debug(f'Found gap between {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S}') files = list(ensure_glm_lcfa_for_period(gap.left, gap.right)) if (sector in 'CF'): run_glmtools(files, max_files=60, sector=sector) else: run_glmtools(files, max_files=60, sector=sector, lat=lat, lon=lon) logger.debug(f'GLM {sector:s} should now be fully covered') for gap in find_glm_coverage_gaps(start_date, end_date, sector=sector, lat=lat, lon=lon): raise RuntimeError(f'I have tried to ensure GLM {sector:s} by running glmtools, but data still appear to be missing for {start_date:%Y-%m-%d %H:%M:%S}--{end_date:%H:%M:%S} :( ') if (sector in 'CF'): pat = get_pattern_dwd_glm(sector) else: pat = get_pattern_dwd_glm(sector, lat=lat, lon=lon) glm = FileSet(path=pat, name='glm') for fileinfo in glm.find(start_date, end_date, no_files_error=True): (yield os.fspath(fileinfo))<|docstring|>Get gridded GLM for period, unless already existing. Yields resulting GLM files as strings.<|endoftext|>
b6e4f09a1d39c5592fb69b0b69eff29ec821e4e3c498fbc1231b85ec00cac7d3
def find_glm_coverage(start_date, end_date, sector='C', lat=None, lon=None): 'Yield intervals corresponding to GLMC coverage.' if (sector in 'CF'): pat = get_pattern_dwd_glm(sector) else: pat = get_pattern_dwd_glm(sector, lat=lat, lon=lon) glm = FileSet(path=pat, name='glm') for file_info in glm.find(start_date, end_date, no_files_error=False): (yield pandas.Interval(pandas.Timestamp(file_info.times[0]), pandas.Timestamp(file_info.times[1])))
Yield intervals corresponding to GLMC coverage.
src/sattools/glm.py
find_glm_coverage
gerritholl/sattools
0
python
def find_glm_coverage(start_date, end_date, sector='C', lat=None, lon=None): if (sector in 'CF'): pat = get_pattern_dwd_glm(sector) else: pat = get_pattern_dwd_glm(sector, lat=lat, lon=lon) glm = FileSet(path=pat, name='glm') for file_info in glm.find(start_date, end_date, no_files_error=False): (yield pandas.Interval(pandas.Timestamp(file_info.times[0]), pandas.Timestamp(file_info.times[1])))
def find_glm_coverage(start_date, end_date, sector='C', lat=None, lon=None): if (sector in 'CF'): pat = get_pattern_dwd_glm(sector) else: pat = get_pattern_dwd_glm(sector, lat=lat, lon=lon) glm = FileSet(path=pat, name='glm') for file_info in glm.find(start_date, end_date, no_files_error=False): (yield pandas.Interval(pandas.Timestamp(file_info.times[0]), pandas.Timestamp(file_info.times[1])))<|docstring|>Yield intervals corresponding to GLMC coverage.<|endoftext|>
55241142ae2bde588beeb17ec7d76271fbc45efb3fdd638511c2cf53707c26ea
def find_glm_coverage_gaps(start_date, end_date, sector='C', lat=None, lon=None): 'Yield intervals not covered by GLMC in period.' last = pandas.Timestamp(start_date) for iv in find_glm_coverage(start_date, end_date, sector=sector, lat=lat, lon=lon): if (iv.left > last): (yield pandas.Interval(last, iv.left)) last = iv.right if (last < end_date): (yield pandas.Interval(last, pandas.Timestamp(end_date)))
Yield intervals not covered by GLMC in period.
src/sattools/glm.py
find_glm_coverage_gaps
gerritholl/sattools
0
python
def find_glm_coverage_gaps(start_date, end_date, sector='C', lat=None, lon=None): last = pandas.Timestamp(start_date) for iv in find_glm_coverage(start_date, end_date, sector=sector, lat=lat, lon=lon): if (iv.left > last): (yield pandas.Interval(last, iv.left)) last = iv.right if (last < end_date): (yield pandas.Interval(last, pandas.Timestamp(end_date)))
def find_glm_coverage_gaps(start_date, end_date, sector='C', lat=None, lon=None): last = pandas.Timestamp(start_date) for iv in find_glm_coverage(start_date, end_date, sector=sector, lat=lat, lon=lon): if (iv.left > last): (yield pandas.Interval(last, iv.left)) last = iv.right if (last < end_date): (yield pandas.Interval(last, pandas.Timestamp(end_date)))<|docstring|>Yield intervals not covered by GLMC in period.<|endoftext|>
ff05fb225b6c546e708b586cd555c73adeccd7d768c2cca8d4a92ccab3c42b9a
def load_file(name, path): 'Help to run glmtools by importing module from file.' spec = importlib.util.spec_from_file_location(name, path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module
Help to run glmtools by importing module from file.
src/sattools/glm.py
load_file
gerritholl/sattools
0
python
def load_file(name, path): spec = importlib.util.spec_from_file_location(name, path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module
def load_file(name, path): spec = importlib.util.spec_from_file_location(name, path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module<|docstring|>Help to run glmtools by importing module from file.<|endoftext|>
92c137be2c8a611febb1fd62c5fb78d7c513fc30e155786618c011aa9f7d1ecf
def run_glmtools(files, max_files=180, sector='C', lat=None, lon=None): 'Run glmtools.\n\n This function runs glmtools.\n ' if (len(files) > max_files): logger.info(f'Got {len(files):d} > {max_files:d} files, splitting...') idx = 0 glmtool = load_file('glmtool', glm_script) parser = glmtool.create_parser() glm_names = {'C': 'conus', 'M1': 'meso', 'M2': 'meso', 'F': 'full'} while (idx < len(files)): these_files = files[idx:(idx + max_files)] logger.info(('Running glmtools for ' + ' '.join((str(f) for f in these_files)))) arg_list = ['--fixed_grid', '--split_events', '--goes_position', 'east', '--goes_sector', glm_names[sector], '--dx=2.0', '--dy=2.0', '--dt', '60'] if (glm_names[sector] == 'meso'): arg_list.extend(['--ctr_lat', f'{lat:.2f}', '--ctr_lon', f'{lon:.2f}']) outdir = get_dwd_glm_basedir(sector=sector, lat=lat, lon=lon) else: outdir = get_dwd_glm_basedir(sector=sector) arg_list.extend(['-o', (str(outdir) + '/{start_time:%Y/%m/%d/%H}/{dataset_name}'), *(str(f) for f in these_files)]) args = parser.parse_args(arg_list) (gridder, glm_filenames, start_time, end_time, grid_kwargs) = glmtool.grid_setup(args) gridder(glm_filenames, start_time, end_time, **grid_kwargs) idx += max_files
Run glmtools. This function runs glmtools.
src/sattools/glm.py
run_glmtools
gerritholl/sattools
0
python
def run_glmtools(files, max_files=180, sector='C', lat=None, lon=None): 'Run glmtools.\n\n This function runs glmtools.\n ' if (len(files) > max_files): logger.info(f'Got {len(files):d} > {max_files:d} files, splitting...') idx = 0 glmtool = load_file('glmtool', glm_script) parser = glmtool.create_parser() glm_names = {'C': 'conus', 'M1': 'meso', 'M2': 'meso', 'F': 'full'} while (idx < len(files)): these_files = files[idx:(idx + max_files)] logger.info(('Running glmtools for ' + ' '.join((str(f) for f in these_files)))) arg_list = ['--fixed_grid', '--split_events', '--goes_position', 'east', '--goes_sector', glm_names[sector], '--dx=2.0', '--dy=2.0', '--dt', '60'] if (glm_names[sector] == 'meso'): arg_list.extend(['--ctr_lat', f'{lat:.2f}', '--ctr_lon', f'{lon:.2f}']) outdir = get_dwd_glm_basedir(sector=sector, lat=lat, lon=lon) else: outdir = get_dwd_glm_basedir(sector=sector) arg_list.extend(['-o', (str(outdir) + '/{start_time:%Y/%m/%d/%H}/{dataset_name}'), *(str(f) for f in these_files)]) args = parser.parse_args(arg_list) (gridder, glm_filenames, start_time, end_time, grid_kwargs) = glmtool.grid_setup(args) gridder(glm_filenames, start_time, end_time, **grid_kwargs) idx += max_files
def run_glmtools(files, max_files=180, sector='C', lat=None, lon=None): 'Run glmtools.\n\n This function runs glmtools.\n ' if (len(files) > max_files): logger.info(f'Got {len(files):d} > {max_files:d} files, splitting...') idx = 0 glmtool = load_file('glmtool', glm_script) parser = glmtool.create_parser() glm_names = {'C': 'conus', 'M1': 'meso', 'M2': 'meso', 'F': 'full'} while (idx < len(files)): these_files = files[idx:(idx + max_files)] logger.info(('Running glmtools for ' + ' '.join((str(f) for f in these_files)))) arg_list = ['--fixed_grid', '--split_events', '--goes_position', 'east', '--goes_sector', glm_names[sector], '--dx=2.0', '--dy=2.0', '--dt', '60'] if (glm_names[sector] == 'meso'): arg_list.extend(['--ctr_lat', f'{lat:.2f}', '--ctr_lon', f'{lon:.2f}']) outdir = get_dwd_glm_basedir(sector=sector, lat=lat, lon=lon) else: outdir = get_dwd_glm_basedir(sector=sector) arg_list.extend(['-o', (str(outdir) + '/{start_time:%Y/%m/%d/%H}/{dataset_name}'), *(str(f) for f in these_files)]) args = parser.parse_args(arg_list) (gridder, glm_filenames, start_time, end_time, grid_kwargs) = glmtool.grid_setup(args) gridder(glm_filenames, start_time, end_time, **grid_kwargs) idx += max_files<|docstring|>Run glmtools. This function runs glmtools.<|endoftext|>
5d457dbe52c6c626f2cfe687133bb656de7466b2151985f3e821681f3fd20717
def get_integrated_scene(glm_files, start_scene=None): 'Get an integrated scene.\n\n Given a set of GLM files, get a scene where quantities are summed or\n averaged or so.\n ' ms = satpy.MultiScene.from_files(glm_files, 'glm_l2', time_threshold=10, group_keys=['start_time']) ms.load(['flash_extent_density']) with xarray.set_options(keep_attrs=True): sc = ms.blend(sum, scene=start_scene) return sc
Get an integrated scene. Given a set of GLM files, get a scene where quantities are summed or averaged or so.
src/sattools/glm.py
get_integrated_scene
gerritholl/sattools
0
python
def get_integrated_scene(glm_files, start_scene=None): 'Get an integrated scene.\n\n Given a set of GLM files, get a scene where quantities are summed or\n averaged or so.\n ' ms = satpy.MultiScene.from_files(glm_files, 'glm_l2', time_threshold=10, group_keys=['start_time']) ms.load(['flash_extent_density']) with xarray.set_options(keep_attrs=True): sc = ms.blend(sum, scene=start_scene) return sc
def get_integrated_scene(glm_files, start_scene=None): 'Get an integrated scene.\n\n Given a set of GLM files, get a scene where quantities are summed or\n averaged or so.\n ' ms = satpy.MultiScene.from_files(glm_files, 'glm_l2', time_threshold=10, group_keys=['start_time']) ms.load(['flash_extent_density']) with xarray.set_options(keep_attrs=True): sc = ms.blend(sum, scene=start_scene) return sc<|docstring|>Get an integrated scene. Given a set of GLM files, get a scene where quantities are summed or averaged or so.<|endoftext|>
05cc94e06122456b4e0a2e7fe93267eb39d4121d4226b21a0397abef2e7962a9
def _TestTableSanity(self, tt, lines): 'Run the given truth table through basic sanity checks.\n\n Args:\n tt: A TruthTable object.\n lines: The expect input lines, in order (list of tuples).\n ' iter1 = iter(tt) iter2 = iter(tt) self.assertEquals(lines[0], iter1.next()) self.assertEquals(lines[0], iter2.next()) self.assertEquals(lines[1], iter2.next()) for (ix, line) in enumerate(tt): self.assertEquals(lines[ix], line) for i in xrange(len(tt)): self.assertEquals(lines[i], tt.GetInputs(i)) self.assertRaises(ValueError, tt.GetInputs, (- 1)) self.assertRaises(ValueError, tt.GetInputs, len(tt))
Run the given truth table through basic sanity checks. Args: tt: A TruthTable object. lines: The expect input lines, in order (list of tuples).
lib/cros_test_lib_unittest.py
_TestTableSanity
bpsinc-native/src_third_party_chromite
0
python
def _TestTableSanity(self, tt, lines): 'Run the given truth table through basic sanity checks.\n\n Args:\n tt: A TruthTable object.\n lines: The expect input lines, in order (list of tuples).\n ' iter1 = iter(tt) iter2 = iter(tt) self.assertEquals(lines[0], iter1.next()) self.assertEquals(lines[0], iter2.next()) self.assertEquals(lines[1], iter2.next()) for (ix, line) in enumerate(tt): self.assertEquals(lines[ix], line) for i in xrange(len(tt)): self.assertEquals(lines[i], tt.GetInputs(i)) self.assertRaises(ValueError, tt.GetInputs, (- 1)) self.assertRaises(ValueError, tt.GetInputs, len(tt))
def _TestTableSanity(self, tt, lines): 'Run the given truth table through basic sanity checks.\n\n Args:\n tt: A TruthTable object.\n lines: The expect input lines, in order (list of tuples).\n ' iter1 = iter(tt) iter2 = iter(tt) self.assertEquals(lines[0], iter1.next()) self.assertEquals(lines[0], iter2.next()) self.assertEquals(lines[1], iter2.next()) for (ix, line) in enumerate(tt): self.assertEquals(lines[ix], line) for i in xrange(len(tt)): self.assertEquals(lines[i], tt.GetInputs(i)) self.assertRaises(ValueError, tt.GetInputs, (- 1)) self.assertRaises(ValueError, tt.GetInputs, len(tt))<|docstring|>Run the given truth table through basic sanity checks. Args: tt: A TruthTable object. lines: The expect input lines, in order (list of tuples).<|endoftext|>
9a22898b7b394ce8ffc0226922d3a6c1fc8db62a43f12e5f9bfc641f584bdb2a
def testTwoDimensions(self): 'Test TruthTable behavior for two boolean inputs.' tt = cros_test_lib.TruthTable(inputs=[(True, True), (True, False)]) self.assertEquals(len(tt), pow(2, 2)) self.assertFalse(tt.GetOutput((False, False))) self.assertFalse(tt.GetOutput((False, True))) self.assertTrue(tt.GetOutput((True, False))) self.assertTrue(tt.GetOutput((True, True))) self.assertRaises(TypeError, tt.GetOutput, True) self.assertRaises(ValueError, tt.GetOutput, (True, True, True)) lines = list(tt) self.assertEquals((False, False), lines[0]) self.assertEquals((False, True), lines[1]) self.assertEquals((True, False), lines[2]) self.assertEquals((True, True), lines[3]) self._TestTableSanity(tt, lines)
Test TruthTable behavior for two boolean inputs.
lib/cros_test_lib_unittest.py
testTwoDimensions
bpsinc-native/src_third_party_chromite
0
python
def testTwoDimensions(self): tt = cros_test_lib.TruthTable(inputs=[(True, True), (True, False)]) self.assertEquals(len(tt), pow(2, 2)) self.assertFalse(tt.GetOutput((False, False))) self.assertFalse(tt.GetOutput((False, True))) self.assertTrue(tt.GetOutput((True, False))) self.assertTrue(tt.GetOutput((True, True))) self.assertRaises(TypeError, tt.GetOutput, True) self.assertRaises(ValueError, tt.GetOutput, (True, True, True)) lines = list(tt) self.assertEquals((False, False), lines[0]) self.assertEquals((False, True), lines[1]) self.assertEquals((True, False), lines[2]) self.assertEquals((True, True), lines[3]) self._TestTableSanity(tt, lines)
def testTwoDimensions(self): tt = cros_test_lib.TruthTable(inputs=[(True, True), (True, False)]) self.assertEquals(len(tt), pow(2, 2)) self.assertFalse(tt.GetOutput((False, False))) self.assertFalse(tt.GetOutput((False, True))) self.assertTrue(tt.GetOutput((True, False))) self.assertTrue(tt.GetOutput((True, True))) self.assertRaises(TypeError, tt.GetOutput, True) self.assertRaises(ValueError, tt.GetOutput, (True, True, True)) lines = list(tt) self.assertEquals((False, False), lines[0]) self.assertEquals((False, True), lines[1]) self.assertEquals((True, False), lines[2]) self.assertEquals((True, True), lines[3]) self._TestTableSanity(tt, lines)<|docstring|>Test TruthTable behavior for two boolean inputs.<|endoftext|>
7e8451dc08fda4ce51e7d53e76184bf14f72e8f73ae621b71b13191ca6ab9d95
def testFourDimensions(self): 'Test TruthTable behavior for four boolean inputs.' false1 = (True, True, True, False) false2 = (True, False, True, False) true1 = (False, True, False, True) true2 = (True, True, False, False) tt = cros_test_lib.TruthTable(inputs=(false1, false2), input_result=False) self.assertEquals(len(tt), pow(2, 4)) self.assertFalse(tt.GetOutput(false1)) self.assertFalse(tt.GetOutput(false2)) self.assertTrue(tt.GetOutput(true1)) self.assertTrue(tt.GetOutput(true2)) self.assertRaises(TypeError, tt.GetOutput, True) self.assertRaises(ValueError, tt.GetOutput, (True, True, True)) lines = list(tt) self.assertEquals((False, False, False, False), lines[0]) self.assertEquals((False, False, False, True), lines[1]) self.assertEquals((False, True, True, True), lines[7]) self.assertEquals((True, True, True, True), lines[15]) self._TestTableSanity(tt, lines)
Test TruthTable behavior for four boolean inputs.
lib/cros_test_lib_unittest.py
testFourDimensions
bpsinc-native/src_third_party_chromite
0
python
def testFourDimensions(self): false1 = (True, True, True, False) false2 = (True, False, True, False) true1 = (False, True, False, True) true2 = (True, True, False, False) tt = cros_test_lib.TruthTable(inputs=(false1, false2), input_result=False) self.assertEquals(len(tt), pow(2, 4)) self.assertFalse(tt.GetOutput(false1)) self.assertFalse(tt.GetOutput(false2)) self.assertTrue(tt.GetOutput(true1)) self.assertTrue(tt.GetOutput(true2)) self.assertRaises(TypeError, tt.GetOutput, True) self.assertRaises(ValueError, tt.GetOutput, (True, True, True)) lines = list(tt) self.assertEquals((False, False, False, False), lines[0]) self.assertEquals((False, False, False, True), lines[1]) self.assertEquals((False, True, True, True), lines[7]) self.assertEquals((True, True, True, True), lines[15]) self._TestTableSanity(tt, lines)
def testFourDimensions(self): false1 = (True, True, True, False) false2 = (True, False, True, False) true1 = (False, True, False, True) true2 = (True, True, False, False) tt = cros_test_lib.TruthTable(inputs=(false1, false2), input_result=False) self.assertEquals(len(tt), pow(2, 4)) self.assertFalse(tt.GetOutput(false1)) self.assertFalse(tt.GetOutput(false2)) self.assertTrue(tt.GetOutput(true1)) self.assertTrue(tt.GetOutput(true2)) self.assertRaises(TypeError, tt.GetOutput, True) self.assertRaises(ValueError, tt.GetOutput, (True, True, True)) lines = list(tt) self.assertEquals((False, False, False, False), lines[0]) self.assertEquals((False, False, False, True), lines[1]) self.assertEquals((False, True, True, True), lines[7]) self.assertEquals((True, True, True, True), lines[15]) self._TestTableSanity(tt, lines)<|docstring|>Test TruthTable behavior for four boolean inputs.<|endoftext|>
33d8050c19cfc2b984364a53d79b9f9ad6331186614b368ea2a734341dcf03d3
def _MockTarList(self, files): 'Mock out tarball content list call.\n\n Args:\n files: A list of contents to return.\n ' self.rc_mock.AddCmdResult(partial_mock.ListRegex('tar -tf'), output='\n'.join(files))
Mock out tarball content list call. Args: files: A list of contents to return.
lib/cros_test_lib_unittest.py
_MockTarList
bpsinc-native/src_third_party_chromite
0
python
def _MockTarList(self, files): 'Mock out tarball content list call.\n\n Args:\n files: A list of contents to return.\n ' self.rc_mock.AddCmdResult(partial_mock.ListRegex('tar -tf'), output='\n'.join(files))
def _MockTarList(self, files): 'Mock out tarball content list call.\n\n Args:\n files: A list of contents to return.\n ' self.rc_mock.AddCmdResult(partial_mock.ListRegex('tar -tf'), output='\n'.join(files))<|docstring|>Mock out tarball content list call. Args: files: A list of contents to return.<|endoftext|>
046e155d5a6abd289b554acf1322a1dd7198fc58fd596264ca164149b415fe53
def testNormPath(self): 'Test path normalization.' tar_contents = ['./', './foo/', './foo/./a', './foo/./b'] dir_struct = [Dir('.', []), Dir('foo', ['a', 'b'])] self._MockTarList(tar_contents) cros_test_lib.VerifyTarball(self.TARBALL, dir_struct)
Test path normalization.
lib/cros_test_lib_unittest.py
testNormPath
bpsinc-native/src_third_party_chromite
0
python
def testNormPath(self): tar_contents = ['./', './foo/', './foo/./a', './foo/./b'] dir_struct = [Dir('.', []), Dir('foo', ['a', 'b'])] self._MockTarList(tar_contents) cros_test_lib.VerifyTarball(self.TARBALL, dir_struct)
def testNormPath(self): tar_contents = ['./', './foo/', './foo/./a', './foo/./b'] dir_struct = [Dir('.', []), Dir('foo', ['a', 'b'])] self._MockTarList(tar_contents) cros_test_lib.VerifyTarball(self.TARBALL, dir_struct)<|docstring|>Test path normalization.<|endoftext|>
42730271c44b4573132d74c828c239426cf5793f9a0726573dbb647ea0850eec
def testDuplicate(self): 'Test duplicate detection.' tar_contents = ['a', 'b', 'a'] dir_struct = ['a', 'b'] self._MockTarList(tar_contents) self.assertRaises(AssertionError, cros_test_lib.VerifyTarball, self.TARBALL, dir_struct)
Test duplicate detection.
lib/cros_test_lib_unittest.py
testDuplicate
bpsinc-native/src_third_party_chromite
0
python
def testDuplicate(self): tar_contents = ['a', 'b', 'a'] dir_struct = ['a', 'b'] self._MockTarList(tar_contents) self.assertRaises(AssertionError, cros_test_lib.VerifyTarball, self.TARBALL, dir_struct)
def testDuplicate(self): tar_contents = ['a', 'b', 'a'] dir_struct = ['a', 'b'] self._MockTarList(tar_contents) self.assertRaises(AssertionError, cros_test_lib.VerifyTarball, self.TARBALL, dir_struct)<|docstring|>Test duplicate detection.<|endoftext|>
b915bbcdb70f1118d6fbad80049b8c6866755838291dd084421dfc5d9595461b
def testPatchRemovalError(self): 'Verify that patch removal during tearDown is robust to Exceptions.' tc = self.MyMockTestCase('testIt') patcher = self.GetPatcher('TO_BE_MOCKED', (- 100)) patcher2 = self.GetPatcher('TO_BE_MOCKED2', (- 200)) patcher3 = self.GetPatcher('TO_BE_MOCKED3', (- 300)) patcher3.start() tc.setUp() tc.StartPatcher(patcher) tc.StartPatcher(patcher2) patcher.stop() self.assertEquals(self.Mockable.TO_BE_MOCKED2, (- 200)) self.assertEquals(self.Mockable.TO_BE_MOCKED3, (- 300)) self.assertRaises(RuntimeError, tc.tearDown) self.assertEquals(self.Mockable.TO_BE_MOCKED2, 10) self.assertEquals(self.Mockable.TO_BE_MOCKED3, 20)
Verify that patch removal during tearDown is robust to Exceptions.
lib/cros_test_lib_unittest.py
testPatchRemovalError
bpsinc-native/src_third_party_chromite
0
python
def testPatchRemovalError(self): tc = self.MyMockTestCase('testIt') patcher = self.GetPatcher('TO_BE_MOCKED', (- 100)) patcher2 = self.GetPatcher('TO_BE_MOCKED2', (- 200)) patcher3 = self.GetPatcher('TO_BE_MOCKED3', (- 300)) patcher3.start() tc.setUp() tc.StartPatcher(patcher) tc.StartPatcher(patcher2) patcher.stop() self.assertEquals(self.Mockable.TO_BE_MOCKED2, (- 200)) self.assertEquals(self.Mockable.TO_BE_MOCKED3, (- 300)) self.assertRaises(RuntimeError, tc.tearDown) self.assertEquals(self.Mockable.TO_BE_MOCKED2, 10) self.assertEquals(self.Mockable.TO_BE_MOCKED3, 20)
def testPatchRemovalError(self): tc = self.MyMockTestCase('testIt') patcher = self.GetPatcher('TO_BE_MOCKED', (- 100)) patcher2 = self.GetPatcher('TO_BE_MOCKED2', (- 200)) patcher3 = self.GetPatcher('TO_BE_MOCKED3', (- 300)) patcher3.start() tc.setUp() tc.StartPatcher(patcher) tc.StartPatcher(patcher2) patcher.stop() self.assertEquals(self.Mockable.TO_BE_MOCKED2, (- 200)) self.assertEquals(self.Mockable.TO_BE_MOCKED3, (- 300)) self.assertRaises(RuntimeError, tc.tearDown) self.assertEquals(self.Mockable.TO_BE_MOCKED2, 10) self.assertEquals(self.Mockable.TO_BE_MOCKED3, 20)<|docstring|>Verify that patch removal during tearDown is robust to Exceptions.<|endoftext|>
1a58aabf1b3703da19c031cd19c6f7f6fb7ae1c228ca6220333a5d9b05d4f0a9
def testTimeout(self): 'Test that test cases are interrupted when they are hanging.' class TimeoutTestCase(cros_test_lib.TestCase): 'Test case that raises a TimeoutError because it takes too long.' TEST_CASE_TIMEOUT = 1 def testSleeping(self): 'Sleep for 2 minutes. This should raise a TimeoutError.' time.sleep((2 * 60)) raise AssertionError('Test case should have timed out.') test = TimeoutTestCase(methodName='testSleeping') self.assertRaises(timeout_util.TimeoutError, test.testSleeping)
Test that test cases are interrupted when they are hanging.
lib/cros_test_lib_unittest.py
testTimeout
bpsinc-native/src_third_party_chromite
0
python
def testTimeout(self): class TimeoutTestCase(cros_test_lib.TestCase): 'Test case that raises a TimeoutError because it takes too long.' TEST_CASE_TIMEOUT = 1 def testSleeping(self): 'Sleep for 2 minutes. This should raise a TimeoutError.' time.sleep((2 * 60)) raise AssertionError('Test case should have timed out.') test = TimeoutTestCase(methodName='testSleeping') self.assertRaises(timeout_util.TimeoutError, test.testSleeping)
def testTimeout(self): class TimeoutTestCase(cros_test_lib.TestCase): 'Test case that raises a TimeoutError because it takes too long.' TEST_CASE_TIMEOUT = 1 def testSleeping(self): 'Sleep for 2 minutes. This should raise a TimeoutError.' time.sleep((2 * 60)) raise AssertionError('Test case should have timed out.') test = TimeoutTestCase(methodName='testSleeping') self.assertRaises(timeout_util.TimeoutError, test.testSleeping)<|docstring|>Test that test cases are interrupted when they are hanging.<|endoftext|>
e03fdb4a89fc34efe8241bd5afecce8833cb13aedf1800ae5299a79ced06cb56
def testSleeping(self): 'Sleep for 2 minutes. This should raise a TimeoutError.' time.sleep((2 * 60)) raise AssertionError('Test case should have timed out.')
Sleep for 2 minutes. This should raise a TimeoutError.
lib/cros_test_lib_unittest.py
testSleeping
bpsinc-native/src_third_party_chromite
0
python
def testSleeping(self): time.sleep((2 * 60)) raise AssertionError('Test case should have timed out.')
def testSleeping(self): time.sleep((2 * 60)) raise AssertionError('Test case should have timed out.')<|docstring|>Sleep for 2 minutes. This should raise a TimeoutError.<|endoftext|>
0c4a7ae24b51f2b59b3ca6a069bba4174518a2ca4ba6638bf4206f32b43f009a
@click.command() @version_flag @click.option('--extent', is_flag=True, help='Set this flag to generate extent imagery') @click.option('--concentration', is_flag=True, help='Set this flag to generate concentration imagery') @click.option('--anomaly', is_flag=True, help='Set this flag to generate anomaly imagery') @click.option('--trend', is_flag=True, help='Set this flag to generate trend imagery') @click.option('--allow_bad_data', is_flag=True, help='Set this flag to set the allow_bad_data option on the image generation commands') @click.option('--north', 'hemisphere', flag_value='-h N', help='Generate imagery for northern hemisphere only') @click.option('--south', 'hemisphere', flag_value='-h S', help='Generate imagery for southern hemisphere only') @click.option('--both', 'hemisphere', flag_value='-h N,S', default=True, help='Generate imagery for both hemispheres') @click.option('--output', default=DEFAULT_OUTPUT, help='Output directory for image generation commands') @click.option('--daily', 'temporality', flag_value='--daily', help='Generate daily images') @click.option('--monthly', 'temporality', flag_value='--monthly', default=True, help='Generate monthly images') @click.option('--hires/--no-hires', is_flag=True, default=True, help='Generate high resolution images in addition to the normal outputs.') @click.option('--blue_marble', is_flag=True, help='Generate blue marble images; has no effect unless at least one of --extent and --concentration are used.') @click.option('--google', is_flag=True, help='Generate Google earth images; has no effect unless --extent is used.') @click.option('--geotiff', is_flag=True, help='Generate Geotiff images; has no effect unless --extent, --concentration or --anomaly is used.') @click.option('--latest', default=1, type=int, help='The number of images of each type to create.') @click.option('--dev', is_flag=True, default=False, help='Run the sii_image commands with `python -m`, so that this command can be run from source.') @seaicelogging.log_command(log) def sii_image_latest(extent, concentration, anomaly, trend, allow_bad_data, hemisphere, output, temporality, hires, blue_marble, google, geotiff, latest, dev): 'Run latest daily or monthly image generation' allow_bad_data = ('--allow-bad-data' if allow_bad_data else '') config = {'hemisphere': hemisphere, 'allow_bad_data': allow_bad_data, 'temporality': temporality, 'output': output, 'latest': latest} command_options = {'extent': extent, 'concentration': concentration, 'anomaly': anomaly, 'trend': trend} commands = [] hires_flags = [''] if hires: hires_flags.append('--hires') sii_image_executable = '{}sii_image'.format(('python -m seaice.images.cli.' if dev else '')) sii_geotiff_executable = '{}sii_image_geotiff'.format(('python -m seaice.images.cli.' if dev else '')) for command in [key for (key, val) in command_options.items() if (val is True)]: special_flags = [''] if (blue_marble and (command in ('extent', 'concentration'))): special_flags.append('--blue_marble') for special_flag in special_flags: for hires_flag in hires_flags: cmd = '{executable} {hemisphere} --{command} {temporality} --latest {latest} {allow_bad_data} --output {output} {special} {hires}'.format(**config, **{'command': command, 'special': special_flag, 'hires': hires_flag, 'executable': sii_image_executable}) commands.append(cmd) if (geotiff and (command in ('extent', 'concentration', 'anomaly'))): cmd = '{executable} {hemisphere} --{command} {temporality} --latest {latest} {allow_bad_data} --output {output}'.format(**config, **{'executable': sii_geotiff_executable, 'command': command}) commands.append(cmd) if google: if (output == DEFAULT_OUTPUT): google_out = '' else: google_out = '-o {}'.format(output) executable = '{}sii_image_google_earth'.format(('python -m seaice.images.cli.' if dev else '')) cmd = '{executable} --latest {latest} {allow_bad_data} {google_out}'.format(**config, **{'executable': executable, 'google_out': google_out}) commands.append(cmd) with Pool() as p: p.map(_run_command, commands)
Run latest daily or monthly image generation
seaice/images/cli/sii_image_latest.py
sii_image_latest
andypbarrett/nsidc-seaice
2
python
@click.command() @version_flag @click.option('--extent', is_flag=True, help='Set this flag to generate extent imagery') @click.option('--concentration', is_flag=True, help='Set this flag to generate concentration imagery') @click.option('--anomaly', is_flag=True, help='Set this flag to generate anomaly imagery') @click.option('--trend', is_flag=True, help='Set this flag to generate trend imagery') @click.option('--allow_bad_data', is_flag=True, help='Set this flag to set the allow_bad_data option on the image generation commands') @click.option('--north', 'hemisphere', flag_value='-h N', help='Generate imagery for northern hemisphere only') @click.option('--south', 'hemisphere', flag_value='-h S', help='Generate imagery for southern hemisphere only') @click.option('--both', 'hemisphere', flag_value='-h N,S', default=True, help='Generate imagery for both hemispheres') @click.option('--output', default=DEFAULT_OUTPUT, help='Output directory for image generation commands') @click.option('--daily', 'temporality', flag_value='--daily', help='Generate daily images') @click.option('--monthly', 'temporality', flag_value='--monthly', default=True, help='Generate monthly images') @click.option('--hires/--no-hires', is_flag=True, default=True, help='Generate high resolution images in addition to the normal outputs.') @click.option('--blue_marble', is_flag=True, help='Generate blue marble images; has no effect unless at least one of --extent and --concentration are used.') @click.option('--google', is_flag=True, help='Generate Google earth images; has no effect unless --extent is used.') @click.option('--geotiff', is_flag=True, help='Generate Geotiff images; has no effect unless --extent, --concentration or --anomaly is used.') @click.option('--latest', default=1, type=int, help='The number of images of each type to create.') @click.option('--dev', is_flag=True, default=False, help='Run the sii_image commands with `python -m`, so that this command can be run from source.') @seaicelogging.log_command(log) def sii_image_latest(extent, concentration, anomaly, trend, allow_bad_data, hemisphere, output, temporality, hires, blue_marble, google, geotiff, latest, dev): allow_bad_data = ('--allow-bad-data' if allow_bad_data else ) config = {'hemisphere': hemisphere, 'allow_bad_data': allow_bad_data, 'temporality': temporality, 'output': output, 'latest': latest} command_options = {'extent': extent, 'concentration': concentration, 'anomaly': anomaly, 'trend': trend} commands = [] hires_flags = [] if hires: hires_flags.append('--hires') sii_image_executable = '{}sii_image'.format(('python -m seaice.images.cli.' if dev else )) sii_geotiff_executable = '{}sii_image_geotiff'.format(('python -m seaice.images.cli.' if dev else )) for command in [key for (key, val) in command_options.items() if (val is True)]: special_flags = [] if (blue_marble and (command in ('extent', 'concentration'))): special_flags.append('--blue_marble') for special_flag in special_flags: for hires_flag in hires_flags: cmd = '{executable} {hemisphere} --{command} {temporality} --latest {latest} {allow_bad_data} --output {output} {special} {hires}'.format(**config, **{'command': command, 'special': special_flag, 'hires': hires_flag, 'executable': sii_image_executable}) commands.append(cmd) if (geotiff and (command in ('extent', 'concentration', 'anomaly'))): cmd = '{executable} {hemisphere} --{command} {temporality} --latest {latest} {allow_bad_data} --output {output}'.format(**config, **{'executable': sii_geotiff_executable, 'command': command}) commands.append(cmd) if google: if (output == DEFAULT_OUTPUT): google_out = else: google_out = '-o {}'.format(output) executable = '{}sii_image_google_earth'.format(('python -m seaice.images.cli.' if dev else )) cmd = '{executable} --latest {latest} {allow_bad_data} {google_out}'.format(**config, **{'executable': executable, 'google_out': google_out}) commands.append(cmd) with Pool() as p: p.map(_run_command, commands)
@click.command() @version_flag @click.option('--extent', is_flag=True, help='Set this flag to generate extent imagery') @click.option('--concentration', is_flag=True, help='Set this flag to generate concentration imagery') @click.option('--anomaly', is_flag=True, help='Set this flag to generate anomaly imagery') @click.option('--trend', is_flag=True, help='Set this flag to generate trend imagery') @click.option('--allow_bad_data', is_flag=True, help='Set this flag to set the allow_bad_data option on the image generation commands') @click.option('--north', 'hemisphere', flag_value='-h N', help='Generate imagery for northern hemisphere only') @click.option('--south', 'hemisphere', flag_value='-h S', help='Generate imagery for southern hemisphere only') @click.option('--both', 'hemisphere', flag_value='-h N,S', default=True, help='Generate imagery for both hemispheres') @click.option('--output', default=DEFAULT_OUTPUT, help='Output directory for image generation commands') @click.option('--daily', 'temporality', flag_value='--daily', help='Generate daily images') @click.option('--monthly', 'temporality', flag_value='--monthly', default=True, help='Generate monthly images') @click.option('--hires/--no-hires', is_flag=True, default=True, help='Generate high resolution images in addition to the normal outputs.') @click.option('--blue_marble', is_flag=True, help='Generate blue marble images; has no effect unless at least one of --extent and --concentration are used.') @click.option('--google', is_flag=True, help='Generate Google earth images; has no effect unless --extent is used.') @click.option('--geotiff', is_flag=True, help='Generate Geotiff images; has no effect unless --extent, --concentration or --anomaly is used.') @click.option('--latest', default=1, type=int, help='The number of images of each type to create.') @click.option('--dev', is_flag=True, default=False, help='Run the sii_image commands with `python -m`, so that this command can be run from source.') @seaicelogging.log_command(log) def sii_image_latest(extent, concentration, anomaly, trend, allow_bad_data, hemisphere, output, temporality, hires, blue_marble, google, geotiff, latest, dev): allow_bad_data = ('--allow-bad-data' if allow_bad_data else ) config = {'hemisphere': hemisphere, 'allow_bad_data': allow_bad_data, 'temporality': temporality, 'output': output, 'latest': latest} command_options = {'extent': extent, 'concentration': concentration, 'anomaly': anomaly, 'trend': trend} commands = [] hires_flags = [] if hires: hires_flags.append('--hires') sii_image_executable = '{}sii_image'.format(('python -m seaice.images.cli.' if dev else )) sii_geotiff_executable = '{}sii_image_geotiff'.format(('python -m seaice.images.cli.' if dev else )) for command in [key for (key, val) in command_options.items() if (val is True)]: special_flags = [] if (blue_marble and (command in ('extent', 'concentration'))): special_flags.append('--blue_marble') for special_flag in special_flags: for hires_flag in hires_flags: cmd = '{executable} {hemisphere} --{command} {temporality} --latest {latest} {allow_bad_data} --output {output} {special} {hires}'.format(**config, **{'command': command, 'special': special_flag, 'hires': hires_flag, 'executable': sii_image_executable}) commands.append(cmd) if (geotiff and (command in ('extent', 'concentration', 'anomaly'))): cmd = '{executable} {hemisphere} --{command} {temporality} --latest {latest} {allow_bad_data} --output {output}'.format(**config, **{'executable': sii_geotiff_executable, 'command': command}) commands.append(cmd) if google: if (output == DEFAULT_OUTPUT): google_out = else: google_out = '-o {}'.format(output) executable = '{}sii_image_google_earth'.format(('python -m seaice.images.cli.' if dev else )) cmd = '{executable} --latest {latest} {allow_bad_data} {google_out}'.format(**config, **{'executable': executable, 'google_out': google_out}) commands.append(cmd) with Pool() as p: p.map(_run_command, commands)<|docstring|>Run latest daily or monthly image generation<|endoftext|>
2abcc87bf4aa025aaa6ac2ba39bc286a0035a5d6af3445d99a51d100c0babdcc
def __init__(self, url, server, user, name, local_dir, commit_hash=None): '\n Initialize a new repository information object.\n\n Arguments:\n url {string} -- Full remote source URL of the repository.\n server {string} -- Name of the source server (e.g., "github.com").\n user {string} -- Username of the repository owner.\n name {string} -- Name of the repository on the server.\n local_dir {string} -- Path to the local clone of the repository.\n commit_hash {string} -- Hash of the last pulled commit.\n\n ' self.url = url self.server = server self.user = user self.name = name self.dir = local_dir self.hash = commit_hash
Initialize a new repository information object. Arguments: url {string} -- Full remote source URL of the repository. server {string} -- Name of the source server (e.g., "github.com"). user {string} -- Username of the repository owner. name {string} -- Name of the repository on the server. local_dir {string} -- Path to the local clone of the repository. commit_hash {string} -- Hash of the last pulled commit.
engine/preprocessing/repoinfo.py
__init__
dentonmwood/codeDuplicationParser
1
python
def __init__(self, url, server, user, name, local_dir, commit_hash=None): '\n Initialize a new repository information object.\n\n Arguments:\n url {string} -- Full remote source URL of the repository.\n server {string} -- Name of the source server (e.g., "github.com").\n user {string} -- Username of the repository owner.\n name {string} -- Name of the repository on the server.\n local_dir {string} -- Path to the local clone of the repository.\n commit_hash {string} -- Hash of the last pulled commit.\n\n ' self.url = url self.server = server self.user = user self.name = name self.dir = local_dir self.hash = commit_hash
def __init__(self, url, server, user, name, local_dir, commit_hash=None): '\n Initialize a new repository information object.\n\n Arguments:\n url {string} -- Full remote source URL of the repository.\n server {string} -- Name of the source server (e.g., "github.com").\n user {string} -- Username of the repository owner.\n name {string} -- Name of the repository on the server.\n local_dir {string} -- Path to the local clone of the repository.\n commit_hash {string} -- Hash of the last pulled commit.\n\n ' self.url = url self.server = server self.user = user self.name = name self.dir = local_dir self.hash = commit_hash<|docstring|>Initialize a new repository information object. Arguments: url {string} -- Full remote source URL of the repository. server {string} -- Name of the source server (e.g., "github.com"). user {string} -- Username of the repository owner. name {string} -- Name of the repository on the server. local_dir {string} -- Path to the local clone of the repository. commit_hash {string} -- Hash of the last pulled commit.<|endoftext|>
38995c67ac6b436adb98f436647dbe53101ca610128603b1682df7ed37944016
def clone_or_pull(self): 'Clone the repository or pull it if it has already been cloned.' try: if isdir(self.dir): repo = Repo(self.dir) repo.remotes.origin.pull() else: repo = Repo.clone_from(self.url, self.dir) self.hash = repo.head.object.hexsha return True except InvalidGitRepositoryError: return False except GitCommandError: return False
Clone the repository or pull it if it has already been cloned.
engine/preprocessing/repoinfo.py
clone_or_pull
dentonmwood/codeDuplicationParser
1
python
def clone_or_pull(self): try: if isdir(self.dir): repo = Repo(self.dir) repo.remotes.origin.pull() else: repo = Repo.clone_from(self.url, self.dir) self.hash = repo.head.object.hexsha return True except InvalidGitRepositoryError: return False except GitCommandError: return False
def clone_or_pull(self): try: if isdir(self.dir): repo = Repo(self.dir) repo.remotes.origin.pull() else: repo = Repo.clone_from(self.url, self.dir) self.hash = repo.head.object.hexsha return True except InvalidGitRepositoryError: return False except GitCommandError: return False<|docstring|>Clone the repository or pull it if it has already been cloned.<|endoftext|>
4b7ee97bdb61bba7a7ed0334df48a645a90bcc23fd276393f8bc19bc7fded332
@staticmethod def parse_repo_info(repo_path): '\n Parse repository information from a repository path.\n\n There are two valid repository path formats:\n - Full remote repository URL (supports both GitHub and GitLab).\n "https://github.com/user/repo"\n - Short GitHub repository URL (only works with GitHub).\n "user/repo"\n\n ' try: parts = urlparse(repo_path) except ValueError: return None if (parts.username or parts.password or parts.params or parts.query or parts.fragment or (parts.scheme.lower() not in {'https', 'http', ''})): return None path_match = _REGEX_PATH.fullmatch(parts.path) if (not path_match): return (None if parts.scheme else RepoInfo.parse_repo_info(((_URL_IMPLICIT_SCHEME + '://') + repo_path))) repo_user = path_match[1] repo_name = path_match[2] scheme = (parts.scheme.lower() or _URL_IMPLICIT_SCHEME) server = (parts.hostname or _URL_IMPLICIT_SERVER) server_match = _REGEX_GITHUBLAB.fullmatch(server) if (parts.hostname and server_match): scheme = _URL_IMPLICIT_SCHEME server = server_match[1].lower() elif (not _REGEX_HOSTNAME.fullmatch(server)): return None full_url = urlunparse((scheme, (':@' + server), f'/{repo_user}/{repo_name}', '', '', '')) clone_dir = path_join(clone_root_dir, server, repo_user, repo_name) return RepoInfo(full_url, server, repo_user, repo_name, clone_dir)
Parse repository information from a repository path. There are two valid repository path formats: - Full remote repository URL (supports both GitHub and GitLab). "https://github.com/user/repo" - Short GitHub repository URL (only works with GitHub). "user/repo"
engine/preprocessing/repoinfo.py
parse_repo_info
dentonmwood/codeDuplicationParser
1
python
@staticmethod def parse_repo_info(repo_path): '\n Parse repository information from a repository path.\n\n There are two valid repository path formats:\n - Full remote repository URL (supports both GitHub and GitLab).\n "https://github.com/user/repo"\n - Short GitHub repository URL (only works with GitHub).\n "user/repo"\n\n ' try: parts = urlparse(repo_path) except ValueError: return None if (parts.username or parts.password or parts.params or parts.query or parts.fragment or (parts.scheme.lower() not in {'https', 'http', })): return None path_match = _REGEX_PATH.fullmatch(parts.path) if (not path_match): return (None if parts.scheme else RepoInfo.parse_repo_info(((_URL_IMPLICIT_SCHEME + '://') + repo_path))) repo_user = path_match[1] repo_name = path_match[2] scheme = (parts.scheme.lower() or _URL_IMPLICIT_SCHEME) server = (parts.hostname or _URL_IMPLICIT_SERVER) server_match = _REGEX_GITHUBLAB.fullmatch(server) if (parts.hostname and server_match): scheme = _URL_IMPLICIT_SCHEME server = server_match[1].lower() elif (not _REGEX_HOSTNAME.fullmatch(server)): return None full_url = urlunparse((scheme, (':@' + server), f'/{repo_user}/{repo_name}', , , )) clone_dir = path_join(clone_root_dir, server, repo_user, repo_name) return RepoInfo(full_url, server, repo_user, repo_name, clone_dir)
@staticmethod def parse_repo_info(repo_path): '\n Parse repository information from a repository path.\n\n There are two valid repository path formats:\n - Full remote repository URL (supports both GitHub and GitLab).\n "https://github.com/user/repo"\n - Short GitHub repository URL (only works with GitHub).\n "user/repo"\n\n ' try: parts = urlparse(repo_path) except ValueError: return None if (parts.username or parts.password or parts.params or parts.query or parts.fragment or (parts.scheme.lower() not in {'https', 'http', })): return None path_match = _REGEX_PATH.fullmatch(parts.path) if (not path_match): return (None if parts.scheme else RepoInfo.parse_repo_info(((_URL_IMPLICIT_SCHEME + '://') + repo_path))) repo_user = path_match[1] repo_name = path_match[2] scheme = (parts.scheme.lower() or _URL_IMPLICIT_SCHEME) server = (parts.hostname or _URL_IMPLICIT_SERVER) server_match = _REGEX_GITHUBLAB.fullmatch(server) if (parts.hostname and server_match): scheme = _URL_IMPLICIT_SCHEME server = server_match[1].lower() elif (not _REGEX_HOSTNAME.fullmatch(server)): return None full_url = urlunparse((scheme, (':@' + server), f'/{repo_user}/{repo_name}', , , )) clone_dir = path_join(clone_root_dir, server, repo_user, repo_name) return RepoInfo(full_url, server, repo_user, repo_name, clone_dir)<|docstring|>Parse repository information from a repository path. There are two valid repository path formats: - Full remote repository URL (supports both GitHub and GitLab). "https://github.com/user/repo" - Short GitHub repository URL (only works with GitHub). "user/repo"<|endoftext|>
a96e8f16b25d88aa2d0bf10d8ff560b1d50a98218a7a93dbc4f1c1e6ea075e89
def __str__(self): 'Convert the most useful repo info into a human-readable string.' info_str = f'{self.url} -> {self.dir}' if self.hash: info_str += f' (commit: {self.hash})' return info_str
Convert the most useful repo info into a human-readable string.
engine/preprocessing/repoinfo.py
__str__
dentonmwood/codeDuplicationParser
1
python
def __str__(self): info_str = f'{self.url} -> {self.dir}' if self.hash: info_str += f' (commit: {self.hash})' return info_str
def __str__(self): info_str = f'{self.url} -> {self.dir}' if self.hash: info_str += f' (commit: {self.hash})' return info_str<|docstring|>Convert the most useful repo info into a human-readable string.<|endoftext|>
7b240894cd65b70af9311d7f0805d218b74b81efa86f6f835789385607963708
def __repr__(self): 'Return string representation of the repository information.' return self.__str__()
Return string representation of the repository information.
engine/preprocessing/repoinfo.py
__repr__
dentonmwood/codeDuplicationParser
1
python
def __repr__(self): return self.__str__()
def __repr__(self): return self.__str__()<|docstring|>Return string representation of the repository information.<|endoftext|>
e053f652cc7bf5bca95a496d0a742c9164876f28d2118f1c9b0e3c9e77ff8495
def __init__(self, experiment, label=None): 'Initialize the PrintPopulationGraphProperties Action' super(PrintPopulationGraphProperties, self).__init__(experiment, name='PrintPopulationGraphProperties', label=label) self.epoch_start = self.experiment.config.getint(self.config_section, 'epoch_start', 0) self.epoch_end = self.experiment.config.getint(self.config_section, 'epoch_end', default=self.experiment.config.getint('Experiment', 'epochs', default=(- 1))) self.frequency = self.experiment.config.getint(self.config_section, 'frequency', 1) self.priority = self.experiment.config.getint(self.config_section, 'priority', 0) self.filename = self.experiment.config.get(self.config_section, 'filename', 'population_graph_properties.csv') self.header = self.experiment.config.get(self.config_section, 'header', default=True) data_file = self.datafile_path(self.filename) self.writer = csv.writer(open(data_file, 'w')) if self.header: header = ['epoch', 'nodes', 'edges', 'avg_degree', 'std_degree', 'avg_clustering_coefficient', 'diameter', 'num_connected_components'] self.writer.writerow(header)
Initialize the PrintPopulationGraphProperties Action
seeds/plugins/action/PrintPopulationGraphProperties.py
__init__
briandconnelly/seeds
11
python
def __init__(self, experiment, label=None): super(PrintPopulationGraphProperties, self).__init__(experiment, name='PrintPopulationGraphProperties', label=label) self.epoch_start = self.experiment.config.getint(self.config_section, 'epoch_start', 0) self.epoch_end = self.experiment.config.getint(self.config_section, 'epoch_end', default=self.experiment.config.getint('Experiment', 'epochs', default=(- 1))) self.frequency = self.experiment.config.getint(self.config_section, 'frequency', 1) self.priority = self.experiment.config.getint(self.config_section, 'priority', 0) self.filename = self.experiment.config.get(self.config_section, 'filename', 'population_graph_properties.csv') self.header = self.experiment.config.get(self.config_section, 'header', default=True) data_file = self.datafile_path(self.filename) self.writer = csv.writer(open(data_file, 'w')) if self.header: header = ['epoch', 'nodes', 'edges', 'avg_degree', 'std_degree', 'avg_clustering_coefficient', 'diameter', 'num_connected_components'] self.writer.writerow(header)
def __init__(self, experiment, label=None): super(PrintPopulationGraphProperties, self).__init__(experiment, name='PrintPopulationGraphProperties', label=label) self.epoch_start = self.experiment.config.getint(self.config_section, 'epoch_start', 0) self.epoch_end = self.experiment.config.getint(self.config_section, 'epoch_end', default=self.experiment.config.getint('Experiment', 'epochs', default=(- 1))) self.frequency = self.experiment.config.getint(self.config_section, 'frequency', 1) self.priority = self.experiment.config.getint(self.config_section, 'priority', 0) self.filename = self.experiment.config.get(self.config_section, 'filename', 'population_graph_properties.csv') self.header = self.experiment.config.get(self.config_section, 'header', default=True) data_file = self.datafile_path(self.filename) self.writer = csv.writer(open(data_file, 'w')) if self.header: header = ['epoch', 'nodes', 'edges', 'avg_degree', 'std_degree', 'avg_clustering_coefficient', 'diameter', 'num_connected_components'] self.writer.writerow(header)<|docstring|>Initialize the PrintPopulationGraphProperties Action<|endoftext|>
69c73634755d3bda527dd6617b28318204b6852e6f9a834afded9038ed593aa3
def update(self): 'Execute the Action' if self.skip_update(): return g = self.experiment.population.topology.graph degrees = list(nx.degree(g).values()) row = [self.experiment.epoch, nx.number_of_nodes(g), nx.number_of_edges(g), mean(degrees), std(degrees), nx.average_clustering(g), nx.diameter(g), nx.number_connected_components(g)] self.writer.writerow(row)
Execute the Action
seeds/plugins/action/PrintPopulationGraphProperties.py
update
briandconnelly/seeds
11
python
def update(self): if self.skip_update(): return g = self.experiment.population.topology.graph degrees = list(nx.degree(g).values()) row = [self.experiment.epoch, nx.number_of_nodes(g), nx.number_of_edges(g), mean(degrees), std(degrees), nx.average_clustering(g), nx.diameter(g), nx.number_connected_components(g)] self.writer.writerow(row)
def update(self): if self.skip_update(): return g = self.experiment.population.topology.graph degrees = list(nx.degree(g).values()) row = [self.experiment.epoch, nx.number_of_nodes(g), nx.number_of_edges(g), mean(degrees), std(degrees), nx.average_clustering(g), nx.diameter(g), nx.number_connected_components(g)] self.writer.writerow(row)<|docstring|>Execute the Action<|endoftext|>
2693ca2c4a5e7322636b1e0975634e5e3d911b817e7d17e6164fc95f62061e78
def PredictTheWinner(self, nums): '\n :type nums: List[int]\n :rtype: bool\n ' memo = {} def score(s, start, end): if ((start, end) in memo): return memo[(start, end)] if (start == end): memo[(start, end)] = nums[start] return nums[start] left = (s - score((s - nums[start]), (start + 1), end)) right = (s - score((s - nums[end]), start, (end - 1))) memo[(start, end)] = max(left, right) return memo[(start, end)] s = sum(nums) return (score(s, 0, (len(nums) - 1)) >= (s / 2))
:type nums: List[int] :rtype: bool
Python3/0486-Predict-the-Winner/soln.py
PredictTheWinner
wyaadarsh/LeetCode-Solutions
5
python
def PredictTheWinner(self, nums): '\n :type nums: List[int]\n :rtype: bool\n ' memo = {} def score(s, start, end): if ((start, end) in memo): return memo[(start, end)] if (start == end): memo[(start, end)] = nums[start] return nums[start] left = (s - score((s - nums[start]), (start + 1), end)) right = (s - score((s - nums[end]), start, (end - 1))) memo[(start, end)] = max(left, right) return memo[(start, end)] s = sum(nums) return (score(s, 0, (len(nums) - 1)) >= (s / 2))
def PredictTheWinner(self, nums): '\n :type nums: List[int]\n :rtype: bool\n ' memo = {} def score(s, start, end): if ((start, end) in memo): return memo[(start, end)] if (start == end): memo[(start, end)] = nums[start] return nums[start] left = (s - score((s - nums[start]), (start + 1), end)) right = (s - score((s - nums[end]), start, (end - 1))) memo[(start, end)] = max(left, right) return memo[(start, end)] s = sum(nums) return (score(s, 0, (len(nums) - 1)) >= (s / 2))<|docstring|>:type nums: List[int] :rtype: bool<|endoftext|>
dc9a414229c1728e1d6a769f7a84e36db02c155f238622bccdf59e8efb806d32
def update(self, data): 'Set the cell values to the new `data`' self.data = data for (i, c) in enumerate(self.cells): c.update(data[i])
Set the cell values to the new `data`
src/dfwidget/main.py
update
JoelStansbury/dfwidget
0
python
def update(self, data): self.data = data for (i, c) in enumerate(self.cells): c.update(data[i])
def update(self, data): self.data = data for (i, c) in enumerate(self.cells): c.update(data[i])<|docstring|>Set the cell values to the new `data`<|endoftext|>
9aaf23e80fdb4312b07abdea9409c08905e6726eb7d1a71ff490ad405b0be8ab
@observe('focus_idx') def focus(self, change): 'Controls the highlighting of rows' old = change['old'] new = change['new'] if (old != (- 1)): self.rows[old].remove_class('row_hover') if (new != (- 1)): self.rows[new].add_class('row_hover')
Controls the highlighting of rows
src/dfwidget/main.py
focus
JoelStansbury/dfwidget
0
python
@observe('focus_idx') def focus(self, change): old = change['old'] new = change['new'] if (old != (- 1)): self.rows[old].remove_class('row_hover') if (new != (- 1)): self.rows[new].add_class('row_hover')
@observe('focus_idx') def focus(self, change): old = change['old'] new = change['new'] if (old != (- 1)): self.rows[old].remove_class('row_hover') if (new != (- 1)): self.rows[new].add_class('row_hover')<|docstring|>Controls the highlighting of rows<|endoftext|>
af7ac0c06eadbe284e9ee0d459c4b34acc6adbf1caeb079ebd87728897aee4ff
def auto_width(self, df, num_rows): '\n Uses the first `num_rows` elements of each column to determine\n the width of each row element.\n ' cols = list(df.columns) ppc = 8 spacing = 2 widths = {} for c in cols: c_width = len(str(c)) d_width = max([len(str(x)) for x in df[c].values[:num_rows]]) widths[c] = (max(c_width, d_width) + spacing) widths['Index'] = (len(str(len(df))) + spacing) cols = (['Index'] + cols) total = sum(list(widths.values())) return (f'{(total * ppc)}px', [f'{ceil(((100 * widths[k]) / total))}%' for k in cols])
Uses the first `num_rows` elements of each column to determine the width of each row element.
src/dfwidget/main.py
auto_width
JoelStansbury/dfwidget
0
python
def auto_width(self, df, num_rows): '\n Uses the first `num_rows` elements of each column to determine\n the width of each row element.\n ' cols = list(df.columns) ppc = 8 spacing = 2 widths = {} for c in cols: c_width = len(str(c)) d_width = max([len(str(x)) for x in df[c].values[:num_rows]]) widths[c] = (max(c_width, d_width) + spacing) widths['Index'] = (len(str(len(df))) + spacing) cols = (['Index'] + cols) total = sum(list(widths.values())) return (f'{(total * ppc)}px', [f'{ceil(((100 * widths[k]) / total))}%' for k in cols])
def auto_width(self, df, num_rows): '\n Uses the first `num_rows` elements of each column to determine\n the width of each row element.\n ' cols = list(df.columns) ppc = 8 spacing = 2 widths = {} for c in cols: c_width = len(str(c)) d_width = max([len(str(x)) for x in df[c].values[:num_rows]]) widths[c] = (max(c_width, d_width) + spacing) widths['Index'] = (len(str(len(df))) + spacing) cols = (['Index'] + cols) total = sum(list(widths.values())) return (f'{(total * ppc)}px', [f'{ceil(((100 * widths[k]) / total))}%' for k in cols])<|docstring|>Uses the first `num_rows` elements of each column to determine the width of each row element.<|endoftext|>
f40bd261daa53b8a708cc3120137a90ef91f5feb3d439ffc554048b218aa021c
def get_interfaces(excluded=None): 'gets interfaces' excluded = (excluded or []) return [iface for iface in netifaces.interfaces() if (iface not in excluded)]
gets interfaces
pydreamscreen/network/discover.py
get_interfaces
J3n50m4t/pydreamscreen
5
python
def get_interfaces(excluded=None): excluded = (excluded or []) return [iface for iface in netifaces.interfaces() if (iface not in excluded)]
def get_interfaces(excluded=None): excluded = (excluded or []) return [iface for iface in netifaces.interfaces() if (iface not in excluded)]<|docstring|>gets interfaces<|endoftext|>
90f51302f4540ab3adee23ab2b678884b6b6952181f24360bb222b3951b798be
def get_networks(excluded=None): 'gets networks' excluded = (excluded or []) return [netifaces.ifaddresses(iface)[netifaces.AF_INET] for iface in get_interfaces(excluded) if (netifaces.AF_INET in netifaces.ifaddresses(iface))]
gets networks
pydreamscreen/network/discover.py
get_networks
J3n50m4t/pydreamscreen
5
python
def get_networks(excluded=None): excluded = (excluded or []) return [netifaces.ifaddresses(iface)[netifaces.AF_INET] for iface in get_interfaces(excluded) if (netifaces.AF_INET in netifaces.ifaddresses(iface))]
def get_networks(excluded=None): excluded = (excluded or []) return [netifaces.ifaddresses(iface)[netifaces.AF_INET] for iface in get_interfaces(excluded) if (netifaces.AF_INET in netifaces.ifaddresses(iface))]<|docstring|>gets networks<|endoftext|>
0be0a4327d00136faeaa146aa90dfcd0521ea48a63929f8d066216beb959a94d
def get_broadcasts(excluded=None): 'gets broadcasts' excluded = (excluded or []) return [addr['broadcast'] for addresses in get_networks(excluded) for addr in addresses if ('broadcast' in addr.keys())]
gets broadcasts
pydreamscreen/network/discover.py
get_broadcasts
J3n50m4t/pydreamscreen
5
python
def get_broadcasts(excluded=None): excluded = (excluded or []) return [addr['broadcast'] for addresses in get_networks(excluded) for addr in addresses if ('broadcast' in addr.keys())]
def get_broadcasts(excluded=None): excluded = (excluded or []) return [addr['broadcast'] for addresses in get_networks(excluded) for addr in addresses if ('broadcast' in addr.keys())]<|docstring|>gets broadcasts<|endoftext|>
55462288b97d2b0be344a13bfe4b32d558729f93b1583dffc8480e5ed7b1c8ae
def simple_offset(observation, catalog, wcsprm, report=''): 'Get best offset in x, y direction.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wold coordinates file\n report : str\n Previous part of the final report that will be extended by the method.\n\n Returns\n -------\n wcsprm, signal, report\n\n ' report = (report + 'simple_offset aproach via a histogram \n') catalog_on_sensor = wcsprm.s2p(catalog[['ra', 'dec']], 1) catalog_on_sensor = catalog_on_sensor['pixcrd'] obs = [observation['xcenter'].values] cat = np.array([catalog_on_sensor[(:, 0)]]) distances_x = (obs - cat.T).flatten() obs = [observation['ycenter'].values] cat = np.array([catalog_on_sensor[(:, 1)]]) distances_y = (obs - cat.T).flatten() binwidth = s.OFFSET_BINWIDTH bins = [np.arange(min(distances_x), (max(distances_x) + binwidth), binwidth), np.arange(min(distances_y), (max(distances_y) + binwidth), binwidth)] (H, x_edges, y_edges) = np.histogram2d(distances_x, distances_y, bins=bins) peak = np.argwhere((H == H.max()))[0] signal = np.sum(H[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))]) signal_wide = np.sum(H[((peak[0] - 4):(peak[0] + 5), (peak[1] - 4):(peak[1] + 5))]) report = (report + 'signal wide (64pixel) - signal (9pixel) = {}. If this value is large then there might be rotation or scaling issues. \n'.format((signal_wide - signal))) x_shift = ((x_edges[peak[0]] + x_edges[(peak[0] + 1)]) / 2) y_shift = ((y_edges[peak[1]] + y_edges[(peak[1] + 1)]) / 2) report = (report + 'We find an offset of {} in the x direction and {} in the y direction \n'.format(x_shift, y_shift)) report = (report + '{} sources are fitting well with this offset. \n'.format(signal)) current_central_pixel = wcsprm.crpix new_central_pixel = [(current_central_pixel[0] + x_shift), (current_central_pixel[1] + y_shift)] wcsprm.crpix = new_central_pixel return (wcsprm, signal, report)
Get best offset in x, y direction. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wold coordinates file report : str Previous part of the final report that will be extended by the method. Returns ------- wcsprm, signal, report
get_transformation.py
simple_offset
fanff/astrometry
9
python
def simple_offset(observation, catalog, wcsprm, report=): 'Get best offset in x, y direction.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wold coordinates file\n report : str\n Previous part of the final report that will be extended by the method.\n\n Returns\n -------\n wcsprm, signal, report\n\n ' report = (report + 'simple_offset aproach via a histogram \n') catalog_on_sensor = wcsprm.s2p(catalog[['ra', 'dec']], 1) catalog_on_sensor = catalog_on_sensor['pixcrd'] obs = [observation['xcenter'].values] cat = np.array([catalog_on_sensor[(:, 0)]]) distances_x = (obs - cat.T).flatten() obs = [observation['ycenter'].values] cat = np.array([catalog_on_sensor[(:, 1)]]) distances_y = (obs - cat.T).flatten() binwidth = s.OFFSET_BINWIDTH bins = [np.arange(min(distances_x), (max(distances_x) + binwidth), binwidth), np.arange(min(distances_y), (max(distances_y) + binwidth), binwidth)] (H, x_edges, y_edges) = np.histogram2d(distances_x, distances_y, bins=bins) peak = np.argwhere((H == H.max()))[0] signal = np.sum(H[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))]) signal_wide = np.sum(H[((peak[0] - 4):(peak[0] + 5), (peak[1] - 4):(peak[1] + 5))]) report = (report + 'signal wide (64pixel) - signal (9pixel) = {}. If this value is large then there might be rotation or scaling issues. \n'.format((signal_wide - signal))) x_shift = ((x_edges[peak[0]] + x_edges[(peak[0] + 1)]) / 2) y_shift = ((y_edges[peak[1]] + y_edges[(peak[1] + 1)]) / 2) report = (report + 'We find an offset of {} in the x direction and {} in the y direction \n'.format(x_shift, y_shift)) report = (report + '{} sources are fitting well with this offset. \n'.format(signal)) current_central_pixel = wcsprm.crpix new_central_pixel = [(current_central_pixel[0] + x_shift), (current_central_pixel[1] + y_shift)] wcsprm.crpix = new_central_pixel return (wcsprm, signal, report)
def simple_offset(observation, catalog, wcsprm, report=): 'Get best offset in x, y direction.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wold coordinates file\n report : str\n Previous part of the final report that will be extended by the method.\n\n Returns\n -------\n wcsprm, signal, report\n\n ' report = (report + 'simple_offset aproach via a histogram \n') catalog_on_sensor = wcsprm.s2p(catalog[['ra', 'dec']], 1) catalog_on_sensor = catalog_on_sensor['pixcrd'] obs = [observation['xcenter'].values] cat = np.array([catalog_on_sensor[(:, 0)]]) distances_x = (obs - cat.T).flatten() obs = [observation['ycenter'].values] cat = np.array([catalog_on_sensor[(:, 1)]]) distances_y = (obs - cat.T).flatten() binwidth = s.OFFSET_BINWIDTH bins = [np.arange(min(distances_x), (max(distances_x) + binwidth), binwidth), np.arange(min(distances_y), (max(distances_y) + binwidth), binwidth)] (H, x_edges, y_edges) = np.histogram2d(distances_x, distances_y, bins=bins) peak = np.argwhere((H == H.max()))[0] signal = np.sum(H[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))]) signal_wide = np.sum(H[((peak[0] - 4):(peak[0] + 5), (peak[1] - 4):(peak[1] + 5))]) report = (report + 'signal wide (64pixel) - signal (9pixel) = {}. If this value is large then there might be rotation or scaling issues. \n'.format((signal_wide - signal))) x_shift = ((x_edges[peak[0]] + x_edges[(peak[0] + 1)]) / 2) y_shift = ((y_edges[peak[1]] + y_edges[(peak[1] + 1)]) / 2) report = (report + 'We find an offset of {} in the x direction and {} in the y direction \n'.format(x_shift, y_shift)) report = (report + '{} sources are fitting well with this offset. \n'.format(signal)) current_central_pixel = wcsprm.crpix new_central_pixel = [(current_central_pixel[0] + x_shift), (current_central_pixel[1] + y_shift)] wcsprm.crpix = new_central_pixel return (wcsprm, signal, report)<|docstring|>Get best offset in x, y direction. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wold coordinates file report : str Previous part of the final report that will be extended by the method. Returns ------- wcsprm, signal, report<|endoftext|>
0f24159b683eab7aab03c56afc42c0bd71c77b532a7b27b02400d59304bf6745
def rotate(wcsprm, rot): 'Help method for offset_with_orientation. Set the different rotations in the header.' pc = wcsprm.get_pc() pc_rotated = (rot @ pc) wcsprm.pc = pc_rotated return wcsprm
Help method for offset_with_orientation. Set the different rotations in the header.
get_transformation.py
rotate
fanff/astrometry
9
python
def rotate(wcsprm, rot): pc = wcsprm.get_pc() pc_rotated = (rot @ pc) wcsprm.pc = pc_rotated return wcsprm
def rotate(wcsprm, rot): pc = wcsprm.get_pc() pc_rotated = (rot @ pc) wcsprm.pc = pc_rotated return wcsprm<|docstring|>Help method for offset_with_orientation. Set the different rotations in the header.<|endoftext|>
c7555f265fca8ad30ab4072308e776bb70b48b59c22f2360140166964b43afce
def offset_with_orientation(observation, catalog, wcsprm, verbose=True, fast=False, report_global='', INCREASE_FOV_FLAG=False, silent=False): 'Use simple_offset(...) but with trying 0,90,180,270 rotation.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n verbose : boolean\n Set to False to supress output to the console\n fast : boolean\n If true will run with subset of the sources to increase speed.\n\n\n Returns\n -------\n wcs, signal, report\n\n ' observation = copy.copy(observation) N_SOURCES = observation.shape[0] if fast: if (N_SOURCES > s.USE_N_SOURCES): N_SOURCES = s.USE_N_SOURCES observation = observation.nlargest(N_SOURCES, 'aperture_sum') if INCREASE_FOV_FLAG: N_CATALOG = (N_SOURCES * 12) else: N_CATALOG = (N_SOURCES * 4) catalog = catalog.nsmallest(N_CATALOG, 'mag') if verbose: print('offset_with_orientation, seaching for offset while considering 0,90,180,270 rotations') if fast: print('running in fast mode') rotations = [[[1, 0], [0, 1]], [[(- 1), 0], [0, (- 1)]], [[(- 1), 0], [0, 1]], [[1, 0], [0, (- 1)]], [[0, 1], [1, 0]], [[0, (- 1)], [(- 1), 0]], [[0, (- 1)], [1, 0]], [[0, 1], [(- 1), 0]]] wcsprm_global = copy.copy(wcsprm) results = [] for rot in rotations: if verbose: print('Trying rotation {}'.format(rot)) wcsprm = rotate(copy.copy(wcsprm_global), rot) report = (report_global + '---- Report for rotation {} ---- \n'.format(rot)) (wcsprm, signal, report) = simple_offset(observation, catalog, wcsprm, report) results.append([copy.copy(wcsprm), signal, report]) signals = [i[1] for i in results] median = np.median(signals) i = np.argmax(signals) wcsprm = results[i][0] signal = signals[i] report = results[i][2] report = (report + 'A total of {} sources from the fits file where used. \n'.format(N_SOURCES)) report = (report + 'The signal (#stars) is {} times higher than noise outlierers for other directions. (more than 2 would be nice, typical: 8 for PS)\n'.format((signals[i] / median))) if verbose: print('We found the following world coordinates: ') print(WCS(wcsprm.to_header())) print('And here is the report:') print(report) print('-----------------------------') off = (wcsprm.crpix - wcsprm_global.crpix) if (not silent): print('Found offset {:.3g} in x direction and {:.3g} in y direction'.format(off[0], off[1])) return (wcsprm, signal, report)
Use simple_offset(...) but with trying 0,90,180,270 rotation. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wcsprm file verbose : boolean Set to False to supress output to the console fast : boolean If true will run with subset of the sources to increase speed. Returns ------- wcs, signal, report
get_transformation.py
offset_with_orientation
fanff/astrometry
9
python
def offset_with_orientation(observation, catalog, wcsprm, verbose=True, fast=False, report_global=, INCREASE_FOV_FLAG=False, silent=False): 'Use simple_offset(...) but with trying 0,90,180,270 rotation.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n verbose : boolean\n Set to False to supress output to the console\n fast : boolean\n If true will run with subset of the sources to increase speed.\n\n\n Returns\n -------\n wcs, signal, report\n\n ' observation = copy.copy(observation) N_SOURCES = observation.shape[0] if fast: if (N_SOURCES > s.USE_N_SOURCES): N_SOURCES = s.USE_N_SOURCES observation = observation.nlargest(N_SOURCES, 'aperture_sum') if INCREASE_FOV_FLAG: N_CATALOG = (N_SOURCES * 12) else: N_CATALOG = (N_SOURCES * 4) catalog = catalog.nsmallest(N_CATALOG, 'mag') if verbose: print('offset_with_orientation, seaching for offset while considering 0,90,180,270 rotations') if fast: print('running in fast mode') rotations = [[[1, 0], [0, 1]], [[(- 1), 0], [0, (- 1)]], [[(- 1), 0], [0, 1]], [[1, 0], [0, (- 1)]], [[0, 1], [1, 0]], [[0, (- 1)], [(- 1), 0]], [[0, (- 1)], [1, 0]], [[0, 1], [(- 1), 0]]] wcsprm_global = copy.copy(wcsprm) results = [] for rot in rotations: if verbose: print('Trying rotation {}'.format(rot)) wcsprm = rotate(copy.copy(wcsprm_global), rot) report = (report_global + '---- Report for rotation {} ---- \n'.format(rot)) (wcsprm, signal, report) = simple_offset(observation, catalog, wcsprm, report) results.append([copy.copy(wcsprm), signal, report]) signals = [i[1] for i in results] median = np.median(signals) i = np.argmax(signals) wcsprm = results[i][0] signal = signals[i] report = results[i][2] report = (report + 'A total of {} sources from the fits file where used. \n'.format(N_SOURCES)) report = (report + 'The signal (#stars) is {} times higher than noise outlierers for other directions. (more than 2 would be nice, typical: 8 for PS)\n'.format((signals[i] / median))) if verbose: print('We found the following world coordinates: ') print(WCS(wcsprm.to_header())) print('And here is the report:') print(report) print('-----------------------------') off = (wcsprm.crpix - wcsprm_global.crpix) if (not silent): print('Found offset {:.3g} in x direction and {:.3g} in y direction'.format(off[0], off[1])) return (wcsprm, signal, report)
def offset_with_orientation(observation, catalog, wcsprm, verbose=True, fast=False, report_global=, INCREASE_FOV_FLAG=False, silent=False): 'Use simple_offset(...) but with trying 0,90,180,270 rotation.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n verbose : boolean\n Set to False to supress output to the console\n fast : boolean\n If true will run with subset of the sources to increase speed.\n\n\n Returns\n -------\n wcs, signal, report\n\n ' observation = copy.copy(observation) N_SOURCES = observation.shape[0] if fast: if (N_SOURCES > s.USE_N_SOURCES): N_SOURCES = s.USE_N_SOURCES observation = observation.nlargest(N_SOURCES, 'aperture_sum') if INCREASE_FOV_FLAG: N_CATALOG = (N_SOURCES * 12) else: N_CATALOG = (N_SOURCES * 4) catalog = catalog.nsmallest(N_CATALOG, 'mag') if verbose: print('offset_with_orientation, seaching for offset while considering 0,90,180,270 rotations') if fast: print('running in fast mode') rotations = [[[1, 0], [0, 1]], [[(- 1), 0], [0, (- 1)]], [[(- 1), 0], [0, 1]], [[1, 0], [0, (- 1)]], [[0, 1], [1, 0]], [[0, (- 1)], [(- 1), 0]], [[0, (- 1)], [1, 0]], [[0, 1], [(- 1), 0]]] wcsprm_global = copy.copy(wcsprm) results = [] for rot in rotations: if verbose: print('Trying rotation {}'.format(rot)) wcsprm = rotate(copy.copy(wcsprm_global), rot) report = (report_global + '---- Report for rotation {} ---- \n'.format(rot)) (wcsprm, signal, report) = simple_offset(observation, catalog, wcsprm, report) results.append([copy.copy(wcsprm), signal, report]) signals = [i[1] for i in results] median = np.median(signals) i = np.argmax(signals) wcsprm = results[i][0] signal = signals[i] report = results[i][2] report = (report + 'A total of {} sources from the fits file where used. \n'.format(N_SOURCES)) report = (report + 'The signal (#stars) is {} times higher than noise outlierers for other directions. (more than 2 would be nice, typical: 8 for PS)\n'.format((signals[i] / median))) if verbose: print('We found the following world coordinates: ') print(WCS(wcsprm.to_header())) print('And here is the report:') print(report) print('-----------------------------') off = (wcsprm.crpix - wcsprm_global.crpix) if (not silent): print('Found offset {:.3g} in x direction and {:.3g} in y direction'.format(off[0], off[1])) return (wcsprm, signal, report)<|docstring|>Use simple_offset(...) but with trying 0,90,180,270 rotation. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wcsprm file verbose : boolean Set to False to supress output to the console fast : boolean If true will run with subset of the sources to increase speed. Returns ------- wcs, signal, report<|endoftext|>
09940ae06a567f08fc71d1f47ce8d9499a8007328cf11aebfe4971ea1b088882
def peak_with_histogram(obs_x, obs_y, cat_x, cat_y): 'Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them.\n\n This should be replaced with a method using convolution instead. Also currently the bandwith is choosen quite random\n\n Parameters\n ----------\n obs_x: array\n first axis to consider of observations (x pos or log distance)\n obs_y: array\n second axis to consider of observations (x pos or log distance)\n cat_x: array\n first axis to consider of catalog data (x pos or log distance)\n cat_x: array\n first axis to consider of catalog data (x pos or log distance)\n\n Returns\n -------\n x_shift, y_shift\n\n ' obs_x = obs_x[(:, np.newaxis)] cat_x = cat_x[(:, np.newaxis)] distances_x = (obs_x - cat_x.T).flatten() obs_y = obs_y[(:, np.newaxis)] cat_y = cat_y[(:, np.newaxis)] distances_y = (obs_y - cat_y.T).flatten() binwidth = 0.001 bins = [np.arange(min(distances_x), (max(distances_x) + binwidth), binwidth), np.arange(min(distances_y), (max(distances_y) + (binwidth * 10)), (binwidth * 10))] plt.figure() (H, x_edges, y_edges, tmp) = plt.hist2d(distances_x, distances_y, bins=bins) plt.show() (H, x_edges, y_edges) = np.histogram2d(distances_x, distances_y, bins=bins) peak = np.argwhere((H == H.max()))[0] x_shift = ((x_edges[peak[0]] + x_edges[(peak[0] + 1)]) / 2) y_shift = ((y_edges[peak[1]] + y_edges[(peak[1] + 1)]) / 2) return (x_shift, y_shift)
Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them. This should be replaced with a method using convolution instead. Also currently the bandwith is choosen quite random Parameters ---------- obs_x: array first axis to consider of observations (x pos or log distance) obs_y: array second axis to consider of observations (x pos or log distance) cat_x: array first axis to consider of catalog data (x pos or log distance) cat_x: array first axis to consider of catalog data (x pos or log distance) Returns ------- x_shift, y_shift
get_transformation.py
peak_with_histogram
fanff/astrometry
9
python
def peak_with_histogram(obs_x, obs_y, cat_x, cat_y): 'Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them.\n\n This should be replaced with a method using convolution instead. Also currently the bandwith is choosen quite random\n\n Parameters\n ----------\n obs_x: array\n first axis to consider of observations (x pos or log distance)\n obs_y: array\n second axis to consider of observations (x pos or log distance)\n cat_x: array\n first axis to consider of catalog data (x pos or log distance)\n cat_x: array\n first axis to consider of catalog data (x pos or log distance)\n\n Returns\n -------\n x_shift, y_shift\n\n ' obs_x = obs_x[(:, np.newaxis)] cat_x = cat_x[(:, np.newaxis)] distances_x = (obs_x - cat_x.T).flatten() obs_y = obs_y[(:, np.newaxis)] cat_y = cat_y[(:, np.newaxis)] distances_y = (obs_y - cat_y.T).flatten() binwidth = 0.001 bins = [np.arange(min(distances_x), (max(distances_x) + binwidth), binwidth), np.arange(min(distances_y), (max(distances_y) + (binwidth * 10)), (binwidth * 10))] plt.figure() (H, x_edges, y_edges, tmp) = plt.hist2d(distances_x, distances_y, bins=bins) plt.show() (H, x_edges, y_edges) = np.histogram2d(distances_x, distances_y, bins=bins) peak = np.argwhere((H == H.max()))[0] x_shift = ((x_edges[peak[0]] + x_edges[(peak[0] + 1)]) / 2) y_shift = ((y_edges[peak[1]] + y_edges[(peak[1] + 1)]) / 2) return (x_shift, y_shift)
def peak_with_histogram(obs_x, obs_y, cat_x, cat_y): 'Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them.\n\n This should be replaced with a method using convolution instead. Also currently the bandwith is choosen quite random\n\n Parameters\n ----------\n obs_x: array\n first axis to consider of observations (x pos or log distance)\n obs_y: array\n second axis to consider of observations (x pos or log distance)\n cat_x: array\n first axis to consider of catalog data (x pos or log distance)\n cat_x: array\n first axis to consider of catalog data (x pos or log distance)\n\n Returns\n -------\n x_shift, y_shift\n\n ' obs_x = obs_x[(:, np.newaxis)] cat_x = cat_x[(:, np.newaxis)] distances_x = (obs_x - cat_x.T).flatten() obs_y = obs_y[(:, np.newaxis)] cat_y = cat_y[(:, np.newaxis)] distances_y = (obs_y - cat_y.T).flatten() binwidth = 0.001 bins = [np.arange(min(distances_x), (max(distances_x) + binwidth), binwidth), np.arange(min(distances_y), (max(distances_y) + (binwidth * 10)), (binwidth * 10))] plt.figure() (H, x_edges, y_edges, tmp) = plt.hist2d(distances_x, distances_y, bins=bins) plt.show() (H, x_edges, y_edges) = np.histogram2d(distances_x, distances_y, bins=bins) peak = np.argwhere((H == H.max()))[0] x_shift = ((x_edges[peak[0]] + x_edges[(peak[0] + 1)]) / 2) y_shift = ((y_edges[peak[1]] + y_edges[(peak[1] + 1)]) / 2) return (x_shift, y_shift)<|docstring|>Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them. This should be replaced with a method using convolution instead. Also currently the bandwith is choosen quite random Parameters ---------- obs_x: array first axis to consider of observations (x pos or log distance) obs_y: array second axis to consider of observations (x pos or log distance) cat_x: array first axis to consider of catalog data (x pos or log distance) cat_x: array first axis to consider of catalog data (x pos or log distance) Returns ------- x_shift, y_shift<|endoftext|>
166cda7899dc3611e8c3d80a1c839f8d7e7e7abbc4449ad9bfbfa3618976c8ee
def cross_corr_to_fourier_space(a): 'Tranform 2D array into fourier space. Uses padding and normalization.' aa = ((a - np.mean(a)) / np.std(a)) aaa = np.pad(aa, (2, 2), 'constant') ff_a = np.fft.fft2(aaa) return ff_a
Tranform 2D array into fourier space. Uses padding and normalization.
get_transformation.py
cross_corr_to_fourier_space
fanff/astrometry
9
python
def cross_corr_to_fourier_space(a): aa = ((a - np.mean(a)) / np.std(a)) aaa = np.pad(aa, (2, 2), 'constant') ff_a = np.fft.fft2(aaa) return ff_a
def cross_corr_to_fourier_space(a): aa = ((a - np.mean(a)) / np.std(a)) aaa = np.pad(aa, (2, 2), 'constant') ff_a = np.fft.fft2(aaa) return ff_a<|docstring|>Tranform 2D array into fourier space. Uses padding and normalization.<|endoftext|>
13abc06fdd1563570c1cc4c226e875588d848c9cae835812fbfa43b4b87ba1c4
def peak_with_cross_correlation(log_distance_obs, angle_obs, log_distance_cat, angle_cat, scale_guessed=False): 'Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them.\n\n This is using cross correlation\n\n Parameters\n ----------\n log_distance_obs: array\n first axis to consider of observations ( log distance)\n angle_obs: array\n second axis to consider of observations ( angle)\n log_distance_cat: array\n first axis to consider of catalog data (log distance)\n angle_cat: array\n first axis to consider of catalog data (angle)\n\n Returns\n -------\n x_shift, y_shift\n\n ' if (scale_guessed == False): minimum_distance = np.log(8) maximum_distance = max(log_distance_obs) else: minimum_distance = min([min(log_distance_cat), min(log_distance_obs)]) maximum_distance = max([max(log_distance_cat), max(log_distance_obs)]) (bins_dist, binwidth_dist) = np.linspace(minimum_distance, maximum_distance, 3000, retstep=True) (bins_ang, binwidth_ang) = np.linspace(min([min(angle_cat), min(angle_obs)]), max([max(angle_cat), max(angle_obs)]), (360 * 3), retstep=True) bins = [bins_dist, bins_ang] (H_obs, x_edges_obs, y_edges_obs) = np.histogram2d(log_distance_obs, angle_obs, bins=bins) (H_cat, x_edges_cat, y_edges_cat) = np.histogram2d(log_distance_cat, angle_cat, bins=bins) H_obs = ((H_obs - np.mean(H_obs)) / np.std(H_obs)) H_cat = ((H_cat - np.mean(H_cat)) / np.std(H_cat)) ff_obs = cross_corr_to_fourier_space(H_obs) ff_cat = cross_corr_to_fourier_space(H_cat) cross_corr = (ff_obs * np.conj(ff_cat)) step = 1 frequ = np.fft.fftfreq(ff_obs.size, d=step).reshape(ff_obs.shape) max_frequ = np.max(frequ) threshold = (0.02 * max_frequ) cross_corr[((frequ < threshold) & (frequ > (- threshold)))] = 0 cross_corr = np.real(np.fft.ifft2(cross_corr)) cross_corr = np.fft.fftshift(cross_corr) peak = np.argwhere((cross_corr == cross_corr.max()))[0] around_peak = cross_corr[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))] peak_x_subpixel = ((np.sum((np.sum(around_peak, axis=1) * (np.arange(around_peak.shape[0]) + 1))) / np.sum(around_peak)) - 2) peak_y_subpixel = ((np.sum((np.sum(around_peak, axis=0) * (np.arange(around_peak.shape[1]) + 1))) / np.sum(around_peak)) - 2) signal = np.sum(cross_corr[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))]) middle_x = (cross_corr.shape[0] / 2) middle_y = (cross_corr.shape[1] / 2) x_shift = (((peak[0] + peak_x_subpixel) - middle_x) * binwidth_dist) y_shift = (((peak[1] + peak_y_subpixel) - middle_y) * binwidth_ang) scaling = (np.e ** (- x_shift)) rotation = y_shift return (scaling, rotation, signal)
Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them. This is using cross correlation Parameters ---------- log_distance_obs: array first axis to consider of observations ( log distance) angle_obs: array second axis to consider of observations ( angle) log_distance_cat: array first axis to consider of catalog data (log distance) angle_cat: array first axis to consider of catalog data (angle) Returns ------- x_shift, y_shift
get_transformation.py
peak_with_cross_correlation
fanff/astrometry
9
python
def peak_with_cross_correlation(log_distance_obs, angle_obs, log_distance_cat, angle_cat, scale_guessed=False): 'Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them.\n\n This is using cross correlation\n\n Parameters\n ----------\n log_distance_obs: array\n first axis to consider of observations ( log distance)\n angle_obs: array\n second axis to consider of observations ( angle)\n log_distance_cat: array\n first axis to consider of catalog data (log distance)\n angle_cat: array\n first axis to consider of catalog data (angle)\n\n Returns\n -------\n x_shift, y_shift\n\n ' if (scale_guessed == False): minimum_distance = np.log(8) maximum_distance = max(log_distance_obs) else: minimum_distance = min([min(log_distance_cat), min(log_distance_obs)]) maximum_distance = max([max(log_distance_cat), max(log_distance_obs)]) (bins_dist, binwidth_dist) = np.linspace(minimum_distance, maximum_distance, 3000, retstep=True) (bins_ang, binwidth_ang) = np.linspace(min([min(angle_cat), min(angle_obs)]), max([max(angle_cat), max(angle_obs)]), (360 * 3), retstep=True) bins = [bins_dist, bins_ang] (H_obs, x_edges_obs, y_edges_obs) = np.histogram2d(log_distance_obs, angle_obs, bins=bins) (H_cat, x_edges_cat, y_edges_cat) = np.histogram2d(log_distance_cat, angle_cat, bins=bins) H_obs = ((H_obs - np.mean(H_obs)) / np.std(H_obs)) H_cat = ((H_cat - np.mean(H_cat)) / np.std(H_cat)) ff_obs = cross_corr_to_fourier_space(H_obs) ff_cat = cross_corr_to_fourier_space(H_cat) cross_corr = (ff_obs * np.conj(ff_cat)) step = 1 frequ = np.fft.fftfreq(ff_obs.size, d=step).reshape(ff_obs.shape) max_frequ = np.max(frequ) threshold = (0.02 * max_frequ) cross_corr[((frequ < threshold) & (frequ > (- threshold)))] = 0 cross_corr = np.real(np.fft.ifft2(cross_corr)) cross_corr = np.fft.fftshift(cross_corr) peak = np.argwhere((cross_corr == cross_corr.max()))[0] around_peak = cross_corr[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))] peak_x_subpixel = ((np.sum((np.sum(around_peak, axis=1) * (np.arange(around_peak.shape[0]) + 1))) / np.sum(around_peak)) - 2) peak_y_subpixel = ((np.sum((np.sum(around_peak, axis=0) * (np.arange(around_peak.shape[1]) + 1))) / np.sum(around_peak)) - 2) signal = np.sum(cross_corr[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))]) middle_x = (cross_corr.shape[0] / 2) middle_y = (cross_corr.shape[1] / 2) x_shift = (((peak[0] + peak_x_subpixel) - middle_x) * binwidth_dist) y_shift = (((peak[1] + peak_y_subpixel) - middle_y) * binwidth_ang) scaling = (np.e ** (- x_shift)) rotation = y_shift return (scaling, rotation, signal)
def peak_with_cross_correlation(log_distance_obs, angle_obs, log_distance_cat, angle_cat, scale_guessed=False): 'Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them.\n\n This is using cross correlation\n\n Parameters\n ----------\n log_distance_obs: array\n first axis to consider of observations ( log distance)\n angle_obs: array\n second axis to consider of observations ( angle)\n log_distance_cat: array\n first axis to consider of catalog data (log distance)\n angle_cat: array\n first axis to consider of catalog data (angle)\n\n Returns\n -------\n x_shift, y_shift\n\n ' if (scale_guessed == False): minimum_distance = np.log(8) maximum_distance = max(log_distance_obs) else: minimum_distance = min([min(log_distance_cat), min(log_distance_obs)]) maximum_distance = max([max(log_distance_cat), max(log_distance_obs)]) (bins_dist, binwidth_dist) = np.linspace(minimum_distance, maximum_distance, 3000, retstep=True) (bins_ang, binwidth_ang) = np.linspace(min([min(angle_cat), min(angle_obs)]), max([max(angle_cat), max(angle_obs)]), (360 * 3), retstep=True) bins = [bins_dist, bins_ang] (H_obs, x_edges_obs, y_edges_obs) = np.histogram2d(log_distance_obs, angle_obs, bins=bins) (H_cat, x_edges_cat, y_edges_cat) = np.histogram2d(log_distance_cat, angle_cat, bins=bins) H_obs = ((H_obs - np.mean(H_obs)) / np.std(H_obs)) H_cat = ((H_cat - np.mean(H_cat)) / np.std(H_cat)) ff_obs = cross_corr_to_fourier_space(H_obs) ff_cat = cross_corr_to_fourier_space(H_cat) cross_corr = (ff_obs * np.conj(ff_cat)) step = 1 frequ = np.fft.fftfreq(ff_obs.size, d=step).reshape(ff_obs.shape) max_frequ = np.max(frequ) threshold = (0.02 * max_frequ) cross_corr[((frequ < threshold) & (frequ > (- threshold)))] = 0 cross_corr = np.real(np.fft.ifft2(cross_corr)) cross_corr = np.fft.fftshift(cross_corr) peak = np.argwhere((cross_corr == cross_corr.max()))[0] around_peak = cross_corr[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))] peak_x_subpixel = ((np.sum((np.sum(around_peak, axis=1) * (np.arange(around_peak.shape[0]) + 1))) / np.sum(around_peak)) - 2) peak_y_subpixel = ((np.sum((np.sum(around_peak, axis=0) * (np.arange(around_peak.shape[1]) + 1))) / np.sum(around_peak)) - 2) signal = np.sum(cross_corr[((peak[0] - 1):(peak[0] + 2), (peak[1] - 1):(peak[1] + 2))]) middle_x = (cross_corr.shape[0] / 2) middle_y = (cross_corr.shape[1] / 2) x_shift = (((peak[0] + peak_x_subpixel) - middle_x) * binwidth_dist) y_shift = (((peak[1] + peak_y_subpixel) - middle_y) * binwidth_ang) scaling = (np.e ** (- x_shift)) rotation = y_shift return (scaling, rotation, signal)<|docstring|>Find the relation between the two sets. Either the positional offset (not used for that at the moment) or the scale+angle between them. This is using cross correlation Parameters ---------- log_distance_obs: array first axis to consider of observations ( log distance) angle_obs: array second axis to consider of observations ( angle) log_distance_cat: array first axis to consider of catalog data (log distance) angle_cat: array first axis to consider of catalog data (angle) Returns ------- x_shift, y_shift<|endoftext|>
41ae4a27af67f82f301cdda808535cc2dbfa6e13224f39147880c9e3d23e2a97
def get_scaling_and_rotation(observation, catalog, wcsprm, scale_guessed, verbose=True, report_global={}): 'Calculate the scaling and rotation compared to the catalog based on the method of Kaiser et al. (1999).\n\n This should be quite similar to the approach by SCAMP.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n verbose : boolean\n Set to False to supress output to the console\n\n\n Returns\n -------\n wcs, signal, report\n\n ' catalog_on_sensor = wcsprm.s2p(catalog[['ra', 'dec']], 1) catalog_on_sensor = catalog_on_sensor['pixcrd'] obs_x = [observation['xcenter'].values] cat_x = np.array([catalog_on_sensor[(:, 0)]]) obs_y = [observation['ycenter'].values] cat_y = np.array([catalog_on_sensor[(:, 1)]]) log_distances_obs = calculate_log_dist(obs_x, obs_y) log_distances_cat = calculate_log_dist(cat_x, cat_y) angles_obs = calculate_angles(obs_x, obs_y) angles_cat = calculate_angles(cat_x, cat_y) (scaling, rotation, signal) = peak_with_cross_correlation(log_distances_obs, angles_obs, log_distances_cat, angles_cat, scale_guessed=scale_guessed) (scaling_reflected, rotation_reflected, signal_reflected) = peak_with_cross_correlation(log_distances_obs, (- angles_obs), log_distances_cat, angles_cat, scale_guessed=scale_guessed) if (signal_reflected > signal): is_reflected = True confidence = (signal_reflected / signal) scaling = scaling_reflected rotation = rotation_reflected else: is_reflected = False confidence = (signal / signal_reflected) rot = rotation_matrix(rotation) if is_reflected: rot = (np.array([[1, 0], [0, (- 1)]]) @ rot) wcsprm_new = rotate(copy.copy(wcsprm), rot) wcsprm_new = scale(wcsprm_new, scaling) if is_reflected: refl = '' else: refl = 'not ' print(((('Found a rotation of {:.3g} deg and the pixelscale was scaled with the factor {:.3g}.'.format((((rotation / 2) / np.pi) * 360), scaling) + 'The image was ') + refl) + 'mirrored.')) if verbose: print('The confidence level is {}. values between 1 and 2 are bad. Much higher values are best.'.format(confidence)) print('Note that there still might be a 180deg rotation. If this is the case it should be correct in the next step') return wcsprm_new
Calculate the scaling and rotation compared to the catalog based on the method of Kaiser et al. (1999). This should be quite similar to the approach by SCAMP. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wcsprm file verbose : boolean Set to False to supress output to the console Returns ------- wcs, signal, report
get_transformation.py
get_scaling_and_rotation
fanff/astrometry
9
python
def get_scaling_and_rotation(observation, catalog, wcsprm, scale_guessed, verbose=True, report_global={}): 'Calculate the scaling and rotation compared to the catalog based on the method of Kaiser et al. (1999).\n\n This should be quite similar to the approach by SCAMP.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n verbose : boolean\n Set to False to supress output to the console\n\n\n Returns\n -------\n wcs, signal, report\n\n ' catalog_on_sensor = wcsprm.s2p(catalog[['ra', 'dec']], 1) catalog_on_sensor = catalog_on_sensor['pixcrd'] obs_x = [observation['xcenter'].values] cat_x = np.array([catalog_on_sensor[(:, 0)]]) obs_y = [observation['ycenter'].values] cat_y = np.array([catalog_on_sensor[(:, 1)]]) log_distances_obs = calculate_log_dist(obs_x, obs_y) log_distances_cat = calculate_log_dist(cat_x, cat_y) angles_obs = calculate_angles(obs_x, obs_y) angles_cat = calculate_angles(cat_x, cat_y) (scaling, rotation, signal) = peak_with_cross_correlation(log_distances_obs, angles_obs, log_distances_cat, angles_cat, scale_guessed=scale_guessed) (scaling_reflected, rotation_reflected, signal_reflected) = peak_with_cross_correlation(log_distances_obs, (- angles_obs), log_distances_cat, angles_cat, scale_guessed=scale_guessed) if (signal_reflected > signal): is_reflected = True confidence = (signal_reflected / signal) scaling = scaling_reflected rotation = rotation_reflected else: is_reflected = False confidence = (signal / signal_reflected) rot = rotation_matrix(rotation) if is_reflected: rot = (np.array([[1, 0], [0, (- 1)]]) @ rot) wcsprm_new = rotate(copy.copy(wcsprm), rot) wcsprm_new = scale(wcsprm_new, scaling) if is_reflected: refl = else: refl = 'not ' print(((('Found a rotation of {:.3g} deg and the pixelscale was scaled with the factor {:.3g}.'.format((((rotation / 2) / np.pi) * 360), scaling) + 'The image was ') + refl) + 'mirrored.')) if verbose: print('The confidence level is {}. values between 1 and 2 are bad. Much higher values are best.'.format(confidence)) print('Note that there still might be a 180deg rotation. If this is the case it should be correct in the next step') return wcsprm_new
def get_scaling_and_rotation(observation, catalog, wcsprm, scale_guessed, verbose=True, report_global={}): 'Calculate the scaling and rotation compared to the catalog based on the method of Kaiser et al. (1999).\n\n This should be quite similar to the approach by SCAMP.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n verbose : boolean\n Set to False to supress output to the console\n\n\n Returns\n -------\n wcs, signal, report\n\n ' catalog_on_sensor = wcsprm.s2p(catalog[['ra', 'dec']], 1) catalog_on_sensor = catalog_on_sensor['pixcrd'] obs_x = [observation['xcenter'].values] cat_x = np.array([catalog_on_sensor[(:, 0)]]) obs_y = [observation['ycenter'].values] cat_y = np.array([catalog_on_sensor[(:, 1)]]) log_distances_obs = calculate_log_dist(obs_x, obs_y) log_distances_cat = calculate_log_dist(cat_x, cat_y) angles_obs = calculate_angles(obs_x, obs_y) angles_cat = calculate_angles(cat_x, cat_y) (scaling, rotation, signal) = peak_with_cross_correlation(log_distances_obs, angles_obs, log_distances_cat, angles_cat, scale_guessed=scale_guessed) (scaling_reflected, rotation_reflected, signal_reflected) = peak_with_cross_correlation(log_distances_obs, (- angles_obs), log_distances_cat, angles_cat, scale_guessed=scale_guessed) if (signal_reflected > signal): is_reflected = True confidence = (signal_reflected / signal) scaling = scaling_reflected rotation = rotation_reflected else: is_reflected = False confidence = (signal / signal_reflected) rot = rotation_matrix(rotation) if is_reflected: rot = (np.array([[1, 0], [0, (- 1)]]) @ rot) wcsprm_new = rotate(copy.copy(wcsprm), rot) wcsprm_new = scale(wcsprm_new, scaling) if is_reflected: refl = else: refl = 'not ' print(((('Found a rotation of {:.3g} deg and the pixelscale was scaled with the factor {:.3g}.'.format((((rotation / 2) / np.pi) * 360), scaling) + 'The image was ') + refl) + 'mirrored.')) if verbose: print('The confidence level is {}. values between 1 and 2 are bad. Much higher values are best.'.format(confidence)) print('Note that there still might be a 180deg rotation. If this is the case it should be correct in the next step') return wcsprm_new<|docstring|>Calculate the scaling and rotation compared to the catalog based on the method of Kaiser et al. (1999). This should be quite similar to the approach by SCAMP. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wcsprm file verbose : boolean Set to False to supress output to the console Returns ------- wcs, signal, report<|endoftext|>
920694a33ac7d303ca99a5e1a7f5f60ac9b8183223ee7124121023d38e30cd2f
def calculate_rms(observation, catalog, wcsprm): 'Calculate the root mean square deviation of the astrometry fit' on_sky = wcsprm.p2s([[0, 0], [1, 1]], 0)['world'] px_scale = np.sqrt((((on_sky[(0, 0)] - on_sky[(1, 0)]) ** 2) + ((on_sky[(0, 1)] - on_sky[(1, 1)]) ** 2))) px_scale = ((px_scale * 60) * 60) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=3) rms = np.sqrt(np.mean(np.square(distances))) print('Within 3 pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format((px_scale * 3), len(obs_x), rms, (rms * px_scale))) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=5) rms = np.sqrt(np.mean(np.square(distances))) print('Within 5 pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format((px_scale * 5), len(obs_x), rms, (rms * px_scale))) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=s.RMS_PX_THRESHOLD) rms = np.sqrt(np.mean(np.square(distances))) print('Within {} pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format(s.RMS_PX_THRESHOLD, (px_scale * s.RMS_PX_THRESHOLD), len(obs_x), rms, (rms * px_scale))) return {'radius_px': s.RMS_PX_THRESHOLD, 'matches': len(obs_x), 'rms': rms}
Calculate the root mean square deviation of the astrometry fit
get_transformation.py
calculate_rms
fanff/astrometry
9
python
def calculate_rms(observation, catalog, wcsprm): on_sky = wcsprm.p2s([[0, 0], [1, 1]], 0)['world'] px_scale = np.sqrt((((on_sky[(0, 0)] - on_sky[(1, 0)]) ** 2) + ((on_sky[(0, 1)] - on_sky[(1, 1)]) ** 2))) px_scale = ((px_scale * 60) * 60) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=3) rms = np.sqrt(np.mean(np.square(distances))) print('Within 3 pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format((px_scale * 3), len(obs_x), rms, (rms * px_scale))) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=5) rms = np.sqrt(np.mean(np.square(distances))) print('Within 5 pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format((px_scale * 5), len(obs_x), rms, (rms * px_scale))) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=s.RMS_PX_THRESHOLD) rms = np.sqrt(np.mean(np.square(distances))) print('Within {} pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format(s.RMS_PX_THRESHOLD, (px_scale * s.RMS_PX_THRESHOLD), len(obs_x), rms, (rms * px_scale))) return {'radius_px': s.RMS_PX_THRESHOLD, 'matches': len(obs_x), 'rms': rms}
def calculate_rms(observation, catalog, wcsprm): on_sky = wcsprm.p2s([[0, 0], [1, 1]], 0)['world'] px_scale = np.sqrt((((on_sky[(0, 0)] - on_sky[(1, 0)]) ** 2) + ((on_sky[(0, 1)] - on_sky[(1, 1)]) ** 2))) px_scale = ((px_scale * 60) * 60) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=3) rms = np.sqrt(np.mean(np.square(distances))) print('Within 3 pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format((px_scale * 3), len(obs_x), rms, (rms * px_scale))) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=5) rms = np.sqrt(np.mean(np.square(distances))) print('Within 5 pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format((px_scale * 5), len(obs_x), rms, (rms * px_scale))) (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=s.RMS_PX_THRESHOLD) rms = np.sqrt(np.mean(np.square(distances))) print('Within {} pixel or {:.3g} arcsec {} sources where matched. The rms is {:.3g} pixel or {:.3g} arcsec'.format(s.RMS_PX_THRESHOLD, (px_scale * s.RMS_PX_THRESHOLD), len(obs_x), rms, (rms * px_scale))) return {'radius_px': s.RMS_PX_THRESHOLD, 'matches': len(obs_x), 'rms': rms}<|docstring|>Calculate the root mean square deviation of the astrometry fit<|endoftext|>
9a6f41bfca8eec48b833a5a15e2d98ec8e74f9c3eb82a4bd9069888224b8ab6b
def fine_transformation(observation, catalog, wcsprm, threshold=1, verbose=True, compare_threshold=3, skip_rot_scale=False): 'Final improvement of registration. This requires that the wcs is already accurate to a few pixels.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n threshold : float\n maximum separation to consider two sources matches\n verbose : boolean\n print details\n\n Returns\n -------\n wcsprm\n\n ' wcsprm_original = wcsprm wcsprm = copy.copy(wcsprm) if ((threshold == 20) or (threshold == 100)): observation = observation.nlargest(5, 'aperture_sum') (obs_x, obs_y, cat_x, cat_y, _) = find_matches(observation, catalog, wcsprm, threshold=threshold) if (len(obs_x) < 4): return (wcsprm_original, 0) angle_offset = ((- calculate_angles([obs_x], [obs_y])) + calculate_angles([cat_x], [cat_y])) log_distances_obs = calculate_log_dist([obs_x], [obs_y]) log_distances_cat = calculate_log_dist([cat_x], [cat_y]) threshold_min = np.log(20) if (threshold == 10): threshold_min = np.log(200) mask = ((log_distances_obs > threshold_min) & (log_distances_cat > threshold_min)) scale_offset = ((- log_distances_obs) + log_distances_cat) angle_offset = angle_offset[mask] scale_offset = scale_offset[mask] rotation = np.mean(angle_offset) scaling = (np.e ** np.mean(scale_offset)) rot = rotation_matrix(rotation) if (not skip_rot_scale): wcsprm = rotate(wcsprm, rot) if ((scaling > 0.9) and (scaling < 1.1)): wcsprm = scale(wcsprm, scaling) else: return (wcsprm_original, 0) (obs_x, obs_y, cat_x, cat_y, _) = find_matches(observation, catalog, wcsprm, threshold=threshold) if (len(obs_x) < 4): return (wcsprm_original, 0) x_shift = np.mean((obs_x - cat_x)) y_shift = np.mean((obs_y - cat_y)) current_central_pixel = wcsprm.crpix new_central_pixel = [(current_central_pixel[0] + x_shift), (current_central_pixel[1] + y_shift)] wcsprm.crpix = new_central_pixel (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=compare_threshold) rms = np.sqrt(np.mean(np.square(distances))) score = (len(obs_x) / (rms + 10)) return (wcsprm, score)
Final improvement of registration. This requires that the wcs is already accurate to a few pixels. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wcsprm file threshold : float maximum separation to consider two sources matches verbose : boolean print details Returns ------- wcsprm
get_transformation.py
fine_transformation
fanff/astrometry
9
python
def fine_transformation(observation, catalog, wcsprm, threshold=1, verbose=True, compare_threshold=3, skip_rot_scale=False): 'Final improvement of registration. This requires that the wcs is already accurate to a few pixels.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n threshold : float\n maximum separation to consider two sources matches\n verbose : boolean\n print details\n\n Returns\n -------\n wcsprm\n\n ' wcsprm_original = wcsprm wcsprm = copy.copy(wcsprm) if ((threshold == 20) or (threshold == 100)): observation = observation.nlargest(5, 'aperture_sum') (obs_x, obs_y, cat_x, cat_y, _) = find_matches(observation, catalog, wcsprm, threshold=threshold) if (len(obs_x) < 4): return (wcsprm_original, 0) angle_offset = ((- calculate_angles([obs_x], [obs_y])) + calculate_angles([cat_x], [cat_y])) log_distances_obs = calculate_log_dist([obs_x], [obs_y]) log_distances_cat = calculate_log_dist([cat_x], [cat_y]) threshold_min = np.log(20) if (threshold == 10): threshold_min = np.log(200) mask = ((log_distances_obs > threshold_min) & (log_distances_cat > threshold_min)) scale_offset = ((- log_distances_obs) + log_distances_cat) angle_offset = angle_offset[mask] scale_offset = scale_offset[mask] rotation = np.mean(angle_offset) scaling = (np.e ** np.mean(scale_offset)) rot = rotation_matrix(rotation) if (not skip_rot_scale): wcsprm = rotate(wcsprm, rot) if ((scaling > 0.9) and (scaling < 1.1)): wcsprm = scale(wcsprm, scaling) else: return (wcsprm_original, 0) (obs_x, obs_y, cat_x, cat_y, _) = find_matches(observation, catalog, wcsprm, threshold=threshold) if (len(obs_x) < 4): return (wcsprm_original, 0) x_shift = np.mean((obs_x - cat_x)) y_shift = np.mean((obs_y - cat_y)) current_central_pixel = wcsprm.crpix new_central_pixel = [(current_central_pixel[0] + x_shift), (current_central_pixel[1] + y_shift)] wcsprm.crpix = new_central_pixel (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=compare_threshold) rms = np.sqrt(np.mean(np.square(distances))) score = (len(obs_x) / (rms + 10)) return (wcsprm, score)
def fine_transformation(observation, catalog, wcsprm, threshold=1, verbose=True, compare_threshold=3, skip_rot_scale=False): 'Final improvement of registration. This requires that the wcs is already accurate to a few pixels.\n\n Parameters\n ----------\n observation : dataframe\n pandas dataframe with sources on the observation\n catalog : dataframe\n pandas dataframe with nearby sources from online catalogs with accurate astrometric information\n wcsprm\n Wcsprm file\n threshold : float\n maximum separation to consider two sources matches\n verbose : boolean\n print details\n\n Returns\n -------\n wcsprm\n\n ' wcsprm_original = wcsprm wcsprm = copy.copy(wcsprm) if ((threshold == 20) or (threshold == 100)): observation = observation.nlargest(5, 'aperture_sum') (obs_x, obs_y, cat_x, cat_y, _) = find_matches(observation, catalog, wcsprm, threshold=threshold) if (len(obs_x) < 4): return (wcsprm_original, 0) angle_offset = ((- calculate_angles([obs_x], [obs_y])) + calculate_angles([cat_x], [cat_y])) log_distances_obs = calculate_log_dist([obs_x], [obs_y]) log_distances_cat = calculate_log_dist([cat_x], [cat_y]) threshold_min = np.log(20) if (threshold == 10): threshold_min = np.log(200) mask = ((log_distances_obs > threshold_min) & (log_distances_cat > threshold_min)) scale_offset = ((- log_distances_obs) + log_distances_cat) angle_offset = angle_offset[mask] scale_offset = scale_offset[mask] rotation = np.mean(angle_offset) scaling = (np.e ** np.mean(scale_offset)) rot = rotation_matrix(rotation) if (not skip_rot_scale): wcsprm = rotate(wcsprm, rot) if ((scaling > 0.9) and (scaling < 1.1)): wcsprm = scale(wcsprm, scaling) else: return (wcsprm_original, 0) (obs_x, obs_y, cat_x, cat_y, _) = find_matches(observation, catalog, wcsprm, threshold=threshold) if (len(obs_x) < 4): return (wcsprm_original, 0) x_shift = np.mean((obs_x - cat_x)) y_shift = np.mean((obs_y - cat_y)) current_central_pixel = wcsprm.crpix new_central_pixel = [(current_central_pixel[0] + x_shift), (current_central_pixel[1] + y_shift)] wcsprm.crpix = new_central_pixel (obs_x, obs_y, cat_x, cat_y, distances) = find_matches(observation, catalog, wcsprm, threshold=compare_threshold) rms = np.sqrt(np.mean(np.square(distances))) score = (len(obs_x) / (rms + 10)) return (wcsprm, score)<|docstring|>Final improvement of registration. This requires that the wcs is already accurate to a few pixels. Parameters ---------- observation : dataframe pandas dataframe with sources on the observation catalog : dataframe pandas dataframe with nearby sources from online catalogs with accurate astrometric information wcsprm Wcsprm file threshold : float maximum separation to consider two sources matches verbose : boolean print details Returns ------- wcsprm<|endoftext|>
02eb6782424b03d6f8ea0bb3580961427fb58151f98707b764fbfecc9e4b7267
def wait(wait_fn, wait_complete_fn, timeout=None, spin_cb=None): "Blocks waiting for an event without blocking the thread indefinitely.\n\n See https://github.com/grpc/grpc/issues/19464 for full context. CPython's\n `threading.Event.wait` and `threading.Condition.wait` methods, if invoked\n without a timeout kwarg, may block the calling thread indefinitely. If the\n call is made from the main thread, this means that signal handlers may not\n run for an arbitrarily long period of time.\n\n This wrapper calls the supplied wait function with an arbitrary short\n timeout to ensure that no signal handler has to wait longer than\n MAXIMUM_WAIT_TIMEOUT before executing.\n\n Args:\n wait_fn: A callable acceptable a single float-valued kwarg named\n `timeout`. This function is expected to be one of `threading.Event.wait`\n or `threading.Condition.wait`.\n wait_complete_fn: A callable taking no arguments and returning a bool.\n When this function returns true, it indicates that waiting should cease.\n timeout: An optional float-valued number of seconds after which the wait\n should cease.\n spin_cb: An optional Callable taking no arguments and returning nothing.\n This callback will be called on each iteration of the spin. This may be\n used for, e.g. work related to forking.\n\n Returns:\n True if a timeout was supplied and it was reached. False otherwise.\n " if (timeout is None): while (not wait_complete_fn()): _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) else: end = (time.time() + timeout) while (not wait_complete_fn()): remaining = min((end - time.time()), MAXIMUM_WAIT_TIMEOUT) if (remaining < 0): return True _wait_once(wait_fn, remaining, spin_cb) return False
Blocks waiting for an event without blocking the thread indefinitely. See https://github.com/grpc/grpc/issues/19464 for full context. CPython's `threading.Event.wait` and `threading.Condition.wait` methods, if invoked without a timeout kwarg, may block the calling thread indefinitely. If the call is made from the main thread, this means that signal handlers may not run for an arbitrarily long period of time. This wrapper calls the supplied wait function with an arbitrary short timeout to ensure that no signal handler has to wait longer than MAXIMUM_WAIT_TIMEOUT before executing. Args: wait_fn: A callable acceptable a single float-valued kwarg named `timeout`. This function is expected to be one of `threading.Event.wait` or `threading.Condition.wait`. wait_complete_fn: A callable taking no arguments and returning a bool. When this function returns true, it indicates that waiting should cease. timeout: An optional float-valued number of seconds after which the wait should cease. spin_cb: An optional Callable taking no arguments and returning nothing. This callback will be called on each iteration of the spin. This may be used for, e.g. work related to forking. Returns: True if a timeout was supplied and it was reached. False otherwise.
src/python/grpcio/grpc/_common.py
wait
nextgenadarsh/grpc
9
python
def wait(wait_fn, wait_complete_fn, timeout=None, spin_cb=None): "Blocks waiting for an event without blocking the thread indefinitely.\n\n See https://github.com/grpc/grpc/issues/19464 for full context. CPython's\n `threading.Event.wait` and `threading.Condition.wait` methods, if invoked\n without a timeout kwarg, may block the calling thread indefinitely. If the\n call is made from the main thread, this means that signal handlers may not\n run for an arbitrarily long period of time.\n\n This wrapper calls the supplied wait function with an arbitrary short\n timeout to ensure that no signal handler has to wait longer than\n MAXIMUM_WAIT_TIMEOUT before executing.\n\n Args:\n wait_fn: A callable acceptable a single float-valued kwarg named\n `timeout`. This function is expected to be one of `threading.Event.wait`\n or `threading.Condition.wait`.\n wait_complete_fn: A callable taking no arguments and returning a bool.\n When this function returns true, it indicates that waiting should cease.\n timeout: An optional float-valued number of seconds after which the wait\n should cease.\n spin_cb: An optional Callable taking no arguments and returning nothing.\n This callback will be called on each iteration of the spin. This may be\n used for, e.g. work related to forking.\n\n Returns:\n True if a timeout was supplied and it was reached. False otherwise.\n " if (timeout is None): while (not wait_complete_fn()): _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) else: end = (time.time() + timeout) while (not wait_complete_fn()): remaining = min((end - time.time()), MAXIMUM_WAIT_TIMEOUT) if (remaining < 0): return True _wait_once(wait_fn, remaining, spin_cb) return False
def wait(wait_fn, wait_complete_fn, timeout=None, spin_cb=None): "Blocks waiting for an event without blocking the thread indefinitely.\n\n See https://github.com/grpc/grpc/issues/19464 for full context. CPython's\n `threading.Event.wait` and `threading.Condition.wait` methods, if invoked\n without a timeout kwarg, may block the calling thread indefinitely. If the\n call is made from the main thread, this means that signal handlers may not\n run for an arbitrarily long period of time.\n\n This wrapper calls the supplied wait function with an arbitrary short\n timeout to ensure that no signal handler has to wait longer than\n MAXIMUM_WAIT_TIMEOUT before executing.\n\n Args:\n wait_fn: A callable acceptable a single float-valued kwarg named\n `timeout`. This function is expected to be one of `threading.Event.wait`\n or `threading.Condition.wait`.\n wait_complete_fn: A callable taking no arguments and returning a bool.\n When this function returns true, it indicates that waiting should cease.\n timeout: An optional float-valued number of seconds after which the wait\n should cease.\n spin_cb: An optional Callable taking no arguments and returning nothing.\n This callback will be called on each iteration of the spin. This may be\n used for, e.g. work related to forking.\n\n Returns:\n True if a timeout was supplied and it was reached. False otherwise.\n " if (timeout is None): while (not wait_complete_fn()): _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) else: end = (time.time() + timeout) while (not wait_complete_fn()): remaining = min((end - time.time()), MAXIMUM_WAIT_TIMEOUT) if (remaining < 0): return True _wait_once(wait_fn, remaining, spin_cb) return False<|docstring|>Blocks waiting for an event without blocking the thread indefinitely. See https://github.com/grpc/grpc/issues/19464 for full context. CPython's `threading.Event.wait` and `threading.Condition.wait` methods, if invoked without a timeout kwarg, may block the calling thread indefinitely. If the call is made from the main thread, this means that signal handlers may not run for an arbitrarily long period of time. This wrapper calls the supplied wait function with an arbitrary short timeout to ensure that no signal handler has to wait longer than MAXIMUM_WAIT_TIMEOUT before executing. Args: wait_fn: A callable acceptable a single float-valued kwarg named `timeout`. This function is expected to be one of `threading.Event.wait` or `threading.Condition.wait`. wait_complete_fn: A callable taking no arguments and returning a bool. When this function returns true, it indicates that waiting should cease. timeout: An optional float-valued number of seconds after which the wait should cease. spin_cb: An optional Callable taking no arguments and returning nothing. This callback will be called on each iteration of the spin. This may be used for, e.g. work related to forking. Returns: True if a timeout was supplied and it was reached. False otherwise.<|endoftext|>
632987084d13b591ee8773c35211d2d4a585f4a9c7645bea98bbf0d4827ffdea
def make_forcing(ds_trajectory, ds_domain, levels_definition, sampling_method): '\n Make a forcing profiles along ds_trajectory using data in ds_domain.\n\n See domains.utils.levels.LevelsDefinition and domains.era5.SamplingMethodDefinition\n for how to construct levels_definition and sampling_method objects\n ' ds_sampling = _make_latlontime_sampling_points(method=sampling_method.time_sampling_method, ds_trajectory=ds_trajectory, ds_domain=ds_domain) ds_sampling = ds_sampling.drop_vars(['origin_lon', 'origin_lat', 'origin_datetime']) ds_sampling['level'] = make_levels(method=levels_definition.method, n_levels=levels_definition.n_levels, z_top=levels_definition.z_top, dz_min=levels_definition.dz_min) forcing_profiles = [] for time in tqdm(ds_sampling.time): ds_profile_posn = ds_sampling.sel(time=time) ds_forcing_profile = profile_calculation.calculate_timestep(ds_profile_posn=ds_profile_posn, ds_domain=ds_domain, sampling_method=sampling_method) forcing_profiles.append(ds_forcing_profile) ds_forcing = xr.concat(forcing_profiles, dim='time') fix_units(ds_forcing) ds_forcing['origin_lon'] = ds_trajectory['origin_lon'] ds_forcing['origin_lat'] = ds_trajectory['origin_lat'] ds_forcing['origin_datetime'] = ds_trajectory['origin_datetime'] return ds_forcing
Make a forcing profiles along ds_trajectory using data in ds_domain. See domains.utils.levels.LevelsDefinition and domains.era5.SamplingMethodDefinition for how to construct levels_definition and sampling_method objects
lagtraj/forcings/create.py
make_forcing
BuildJet/lagtraj
4
python
def make_forcing(ds_trajectory, ds_domain, levels_definition, sampling_method): '\n Make a forcing profiles along ds_trajectory using data in ds_domain.\n\n See domains.utils.levels.LevelsDefinition and domains.era5.SamplingMethodDefinition\n for how to construct levels_definition and sampling_method objects\n ' ds_sampling = _make_latlontime_sampling_points(method=sampling_method.time_sampling_method, ds_trajectory=ds_trajectory, ds_domain=ds_domain) ds_sampling = ds_sampling.drop_vars(['origin_lon', 'origin_lat', 'origin_datetime']) ds_sampling['level'] = make_levels(method=levels_definition.method, n_levels=levels_definition.n_levels, z_top=levels_definition.z_top, dz_min=levels_definition.dz_min) forcing_profiles = [] for time in tqdm(ds_sampling.time): ds_profile_posn = ds_sampling.sel(time=time) ds_forcing_profile = profile_calculation.calculate_timestep(ds_profile_posn=ds_profile_posn, ds_domain=ds_domain, sampling_method=sampling_method) forcing_profiles.append(ds_forcing_profile) ds_forcing = xr.concat(forcing_profiles, dim='time') fix_units(ds_forcing) ds_forcing['origin_lon'] = ds_trajectory['origin_lon'] ds_forcing['origin_lat'] = ds_trajectory['origin_lat'] ds_forcing['origin_datetime'] = ds_trajectory['origin_datetime'] return ds_forcing
def make_forcing(ds_trajectory, ds_domain, levels_definition, sampling_method): '\n Make a forcing profiles along ds_trajectory using data in ds_domain.\n\n See domains.utils.levels.LevelsDefinition and domains.era5.SamplingMethodDefinition\n for how to construct levels_definition and sampling_method objects\n ' ds_sampling = _make_latlontime_sampling_points(method=sampling_method.time_sampling_method, ds_trajectory=ds_trajectory, ds_domain=ds_domain) ds_sampling = ds_sampling.drop_vars(['origin_lon', 'origin_lat', 'origin_datetime']) ds_sampling['level'] = make_levels(method=levels_definition.method, n_levels=levels_definition.n_levels, z_top=levels_definition.z_top, dz_min=levels_definition.dz_min) forcing_profiles = [] for time in tqdm(ds_sampling.time): ds_profile_posn = ds_sampling.sel(time=time) ds_forcing_profile = profile_calculation.calculate_timestep(ds_profile_posn=ds_profile_posn, ds_domain=ds_domain, sampling_method=sampling_method) forcing_profiles.append(ds_forcing_profile) ds_forcing = xr.concat(forcing_profiles, dim='time') fix_units(ds_forcing) ds_forcing['origin_lon'] = ds_trajectory['origin_lon'] ds_forcing['origin_lat'] = ds_trajectory['origin_lat'] ds_forcing['origin_datetime'] = ds_trajectory['origin_datetime'] return ds_forcing<|docstring|>Make a forcing profiles along ds_trajectory using data in ds_domain. See domains.utils.levels.LevelsDefinition and domains.era5.SamplingMethodDefinition for how to construct levels_definition and sampling_method objects<|endoftext|>
57a7b86fa3f129510de75877eb6bd5a8449d3766ec9f44a627061a0185de882c
def cli(args=None): '\n Function called with arguments passed from the command line when making\n trajectories through the CLI. When `args==None` they will be taken from\n `sys.argv`\n ' import argparse argparser = argparse.ArgumentParser() argparser.add_argument('forcing') argparser.add_argument('-d', '--data-path', default=DEFAULT_ROOT_DATA_PATH, type=Path) available_conversion_targets = conversion.targets.available.keys() argparser.add_argument('-c', '--conversion', help=f"name of output conversion to use, available conversions: {', '.join(available_conversion_targets)}", default=None) argparser.add_argument('--debug', default=False, action='store_true') args = argparser.parse_args(args=args) forcing_defn = load.load_definition(root_data_path=args.data_path, forcing_name=args.forcing) with optional_debugging(args.debug): main(data_path=args.data_path, forcing_defn=forcing_defn, conversion_name=args.conversion)
Function called with arguments passed from the command line when making trajectories through the CLI. When `args==None` they will be taken from `sys.argv`
lagtraj/forcings/create.py
cli
BuildJet/lagtraj
4
python
def cli(args=None): '\n Function called with arguments passed from the command line when making\n trajectories through the CLI. When `args==None` they will be taken from\n `sys.argv`\n ' import argparse argparser = argparse.ArgumentParser() argparser.add_argument('forcing') argparser.add_argument('-d', '--data-path', default=DEFAULT_ROOT_DATA_PATH, type=Path) available_conversion_targets = conversion.targets.available.keys() argparser.add_argument('-c', '--conversion', help=f"name of output conversion to use, available conversions: {', '.join(available_conversion_targets)}", default=None) argparser.add_argument('--debug', default=False, action='store_true') args = argparser.parse_args(args=args) forcing_defn = load.load_definition(root_data_path=args.data_path, forcing_name=args.forcing) with optional_debugging(args.debug): main(data_path=args.data_path, forcing_defn=forcing_defn, conversion_name=args.conversion)
def cli(args=None): '\n Function called with arguments passed from the command line when making\n trajectories through the CLI. When `args==None` they will be taken from\n `sys.argv`\n ' import argparse argparser = argparse.ArgumentParser() argparser.add_argument('forcing') argparser.add_argument('-d', '--data-path', default=DEFAULT_ROOT_DATA_PATH, type=Path) available_conversion_targets = conversion.targets.available.keys() argparser.add_argument('-c', '--conversion', help=f"name of output conversion to use, available conversions: {', '.join(available_conversion_targets)}", default=None) argparser.add_argument('--debug', default=False, action='store_true') args = argparser.parse_args(args=args) forcing_defn = load.load_definition(root_data_path=args.data_path, forcing_name=args.forcing) with optional_debugging(args.debug): main(data_path=args.data_path, forcing_defn=forcing_defn, conversion_name=args.conversion)<|docstring|>Function called with arguments passed from the command line when making trajectories through the CLI. When `args==None` they will be taken from `sys.argv`<|endoftext|>
6a857409926dd0946594b6a56540cdf7578fe7d9407cef30e07644bcc5e3bfe9
def visstd(a, s=0.1): '\n Normalise the image range for visualisation.\n :param a: the array to normalise\n :param s: ?\n :return: the normalised image\n ' return ((((a - a.mean()) / max(a.std(), 0.0001)) * s) + 0.5)
Normalise the image range for visualisation. :param a: the array to normalise :param s: ? :return: the normalised image
multiscale_dreaming.py
visstd
EdCo95/deepdream
2
python
def visstd(a, s=0.1): '\n Normalise the image range for visualisation.\n :param a: the array to normalise\n :param s: ?\n :return: the normalised image\n ' return ((((a - a.mean()) / max(a.std(), 0.0001)) * s) + 0.5)
def visstd(a, s=0.1): '\n Normalise the image range for visualisation.\n :param a: the array to normalise\n :param s: ?\n :return: the normalised image\n ' return ((((a - a.mean()) / max(a.std(), 0.0001)) * s) + 0.5)<|docstring|>Normalise the image range for visualisation. :param a: the array to normalise :param s: ? :return: the normalised image<|endoftext|>
cbeecb4878e1cbe9a5d0e3a6d788730a65362c8f0792ab4e4eb524db563e57e2
def T(layer): "\n Convenience function for getting a layer's output tensor\n :param layer: the layer to get the tensor\n :return: the tensor\n " return graph.get_tensor_by_name(('import/%s:0' % layer))
Convenience function for getting a layer's output tensor :param layer: the layer to get the tensor :return: the tensor
multiscale_dreaming.py
T
EdCo95/deepdream
2
python
def T(layer): "\n Convenience function for getting a layer's output tensor\n :param layer: the layer to get the tensor\n :return: the tensor\n " return graph.get_tensor_by_name(('import/%s:0' % layer))
def T(layer): "\n Convenience function for getting a layer's output tensor\n :param layer: the layer to get the tensor\n :return: the tensor\n " return graph.get_tensor_by_name(('import/%s:0' % layer))<|docstring|>Convenience function for getting a layer's output tensor :param layer: the layer to get the tensor :return: the tensor<|endoftext|>
a24aeb003041c1c5deb909b6680d2493f0575312f224c5b9b8b3bf5f67710c29
def tffunc(*argtypes): '\n Helper function that transforms the TF-graph generating function into a regular one - used to resize the image with\n Tensorflow in combination with the "resize" function below.\n :param argtypes: multiple parameters.\n :return: a normal function\n ' placeholders = list(map(tf.placeholder, argtypes)) def wrap(f): out = f(*placeholders) def wrapper(*args, **kw): return out.eval(dict(zip(placeholders, args)), session=kw.get('session')) return wrapper return wrap
Helper function that transforms the TF-graph generating function into a regular one - used to resize the image with Tensorflow in combination with the "resize" function below. :param argtypes: multiple parameters. :return: a normal function
multiscale_dreaming.py
tffunc
EdCo95/deepdream
2
python
def tffunc(*argtypes): '\n Helper function that transforms the TF-graph generating function into a regular one - used to resize the image with\n Tensorflow in combination with the "resize" function below.\n :param argtypes: multiple parameters.\n :return: a normal function\n ' placeholders = list(map(tf.placeholder, argtypes)) def wrap(f): out = f(*placeholders) def wrapper(*args, **kw): return out.eval(dict(zip(placeholders, args)), session=kw.get('session')) return wrapper return wrap
def tffunc(*argtypes): '\n Helper function that transforms the TF-graph generating function into a regular one - used to resize the image with\n Tensorflow in combination with the "resize" function below.\n :param argtypes: multiple parameters.\n :return: a normal function\n ' placeholders = list(map(tf.placeholder, argtypes)) def wrap(f): out = f(*placeholders) def wrapper(*args, **kw): return out.eval(dict(zip(placeholders, args)), session=kw.get('session')) return wrapper return wrap<|docstring|>Helper function that transforms the TF-graph generating function into a regular one - used to resize the image with Tensorflow in combination with the "resize" function below. :param argtypes: multiple parameters. :return: a normal function<|endoftext|>
578fd17f2a679d1ade1b513215fb00cb579c95567e60384777aba8c084c20ee7
def resize(img, size): '\n Resizes and image using Tensorflow. Works in tandem with tffunc, above.\n :param img: the image to resize.\n :param size: the size to change the image to.\n :return: the resized image.\n ' img = tf.expand_dims(img, 0) return tf.image.resize_bilinear(img, size)[(0, :, :, :)]
Resizes and image using Tensorflow. Works in tandem with tffunc, above. :param img: the image to resize. :param size: the size to change the image to. :return: the resized image.
multiscale_dreaming.py
resize
EdCo95/deepdream
2
python
def resize(img, size): '\n Resizes and image using Tensorflow. Works in tandem with tffunc, above.\n :param img: the image to resize.\n :param size: the size to change the image to.\n :return: the resized image.\n ' img = tf.expand_dims(img, 0) return tf.image.resize_bilinear(img, size)[(0, :, :, :)]
def resize(img, size): '\n Resizes and image using Tensorflow. Works in tandem with tffunc, above.\n :param img: the image to resize.\n :param size: the size to change the image to.\n :return: the resized image.\n ' img = tf.expand_dims(img, 0) return tf.image.resize_bilinear(img, size)[(0, :, :, :)]<|docstring|>Resizes and image using Tensorflow. Works in tandem with tffunc, above. :param img: the image to resize. :param size: the size to change the image to. :return: the resized image.<|endoftext|>
74dd05ea2d065c6a5074cc221ec55e3e49593558ed231de4386c89bbd1e0b482
def calc_grad_tiled(img, t_grad, tile_size=512): '\n Computes the value of tensor t_grad over the image in a tiled way. Random shifts are applied to the image to blur\n tile boundaries over multiple iterations.\n :param img: the image to modify.\n :param t_grad: the gradient to compute, as a TensorFlow operation.\n :param tile_size: the size of each image tile.\n :return: the randomly shifted image.\n ' size = tile_size (height, width) = img.shape[:2] (sx, sy) = np.random.randint(size, size=2) img_shift = np.roll(np.roll(img, sx, 1), sy, 0) grad = np.zeros_like(img) for y in range(0, max((height - (size // 2)), size), size): for x in range(0, max((width - (size // 2)), size), size): sub = img_shift[(y:(y + size), x:(x + size))] g = sess.run(t_grad, {t_input: sub}) grad[(y:(y + size), x:(x + size))] = g return np.roll(np.roll(grad, (- sx), 1), (- sy), 0)
Computes the value of tensor t_grad over the image in a tiled way. Random shifts are applied to the image to blur tile boundaries over multiple iterations. :param img: the image to modify. :param t_grad: the gradient to compute, as a TensorFlow operation. :param tile_size: the size of each image tile. :return: the randomly shifted image.
multiscale_dreaming.py
calc_grad_tiled
EdCo95/deepdream
2
python
def calc_grad_tiled(img, t_grad, tile_size=512): '\n Computes the value of tensor t_grad over the image in a tiled way. Random shifts are applied to the image to blur\n tile boundaries over multiple iterations.\n :param img: the image to modify.\n :param t_grad: the gradient to compute, as a TensorFlow operation.\n :param tile_size: the size of each image tile.\n :return: the randomly shifted image.\n ' size = tile_size (height, width) = img.shape[:2] (sx, sy) = np.random.randint(size, size=2) img_shift = np.roll(np.roll(img, sx, 1), sy, 0) grad = np.zeros_like(img) for y in range(0, max((height - (size // 2)), size), size): for x in range(0, max((width - (size // 2)), size), size): sub = img_shift[(y:(y + size), x:(x + size))] g = sess.run(t_grad, {t_input: sub}) grad[(y:(y + size), x:(x + size))] = g return np.roll(np.roll(grad, (- sx), 1), (- sy), 0)
def calc_grad_tiled(img, t_grad, tile_size=512): '\n Computes the value of tensor t_grad over the image in a tiled way. Random shifts are applied to the image to blur\n tile boundaries over multiple iterations.\n :param img: the image to modify.\n :param t_grad: the gradient to compute, as a TensorFlow operation.\n :param tile_size: the size of each image tile.\n :return: the randomly shifted image.\n ' size = tile_size (height, width) = img.shape[:2] (sx, sy) = np.random.randint(size, size=2) img_shift = np.roll(np.roll(img, sx, 1), sy, 0) grad = np.zeros_like(img) for y in range(0, max((height - (size // 2)), size), size): for x in range(0, max((width - (size // 2)), size), size): sub = img_shift[(y:(y + size), x:(x + size))] g = sess.run(t_grad, {t_input: sub}) grad[(y:(y + size), x:(x + size))] = g return np.roll(np.roll(grad, (- sx), 1), (- sy), 0)<|docstring|>Computes the value of tensor t_grad over the image in a tiled way. Random shifts are applied to the image to blur tile boundaries over multiple iterations. :param img: the image to modify. :param t_grad: the gradient to compute, as a TensorFlow operation. :param tile_size: the size of each image tile. :return: the randomly shifted image.<|endoftext|>
0b8648a3bcea19354ed6f3e560f7aa511a43b6c30fe99750889d2090f96d90f6
def render_multiscale(t_obj, img0, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4): '\n Renders the image at different sizes.\n :param t_obj: the objective to render.\n :param img0: the image to alter.\n :param iter_n: the number of iterations of changes to apply.\n :param step: the step size for each image alteration.\n :param octave_n: the number of different octaves to scale over.\n :param octave_scale: scale up the octaves.\n ' t_score = tf.reduce_mean(t_obj) t_grad = tf.gradients(t_score, t_input)[0] img = img0.copy() for octave in range(octave_n): if (octave > 0): hw = (np.float32(img.shape[:2]) * octave_scale) img = resize(img, np.int32(hw)) for i in range(iter_n): g = calc_grad_tiled(img, t_grad) g /= (g.std() + 1e-08) img += (g * step) print('.', end=' ') showarray(visstd(img))
Renders the image at different sizes. :param t_obj: the objective to render. :param img0: the image to alter. :param iter_n: the number of iterations of changes to apply. :param step: the step size for each image alteration. :param octave_n: the number of different octaves to scale over. :param octave_scale: scale up the octaves.
multiscale_dreaming.py
render_multiscale
EdCo95/deepdream
2
python
def render_multiscale(t_obj, img0, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4): '\n Renders the image at different sizes.\n :param t_obj: the objective to render.\n :param img0: the image to alter.\n :param iter_n: the number of iterations of changes to apply.\n :param step: the step size for each image alteration.\n :param octave_n: the number of different octaves to scale over.\n :param octave_scale: scale up the octaves.\n ' t_score = tf.reduce_mean(t_obj) t_grad = tf.gradients(t_score, t_input)[0] img = img0.copy() for octave in range(octave_n): if (octave > 0): hw = (np.float32(img.shape[:2]) * octave_scale) img = resize(img, np.int32(hw)) for i in range(iter_n): g = calc_grad_tiled(img, t_grad) g /= (g.std() + 1e-08) img += (g * step) print('.', end=' ') showarray(visstd(img))
def render_multiscale(t_obj, img0, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4): '\n Renders the image at different sizes.\n :param t_obj: the objective to render.\n :param img0: the image to alter.\n :param iter_n: the number of iterations of changes to apply.\n :param step: the step size for each image alteration.\n :param octave_n: the number of different octaves to scale over.\n :param octave_scale: scale up the octaves.\n ' t_score = tf.reduce_mean(t_obj) t_grad = tf.gradients(t_score, t_input)[0] img = img0.copy() for octave in range(octave_n): if (octave > 0): hw = (np.float32(img.shape[:2]) * octave_scale) img = resize(img, np.int32(hw)) for i in range(iter_n): g = calc_grad_tiled(img, t_grad) g /= (g.std() + 1e-08) img += (g * step) print('.', end=' ') showarray(visstd(img))<|docstring|>Renders the image at different sizes. :param t_obj: the objective to render. :param img0: the image to alter. :param iter_n: the number of iterations of changes to apply. :param step: the step size for each image alteration. :param octave_n: the number of different octaves to scale over. :param octave_scale: scale up the octaves.<|endoftext|>
184a3270e87ad16f90b1f5b8023b5700711c3b347ff4a043c4b1cad4cfb4cbbf
def concatFreeTimes(currentFreeTimes, userFreeTimes, begin_date, end_date): '\n This function takes two lists of free times and combines them\n Then crops out any resulting free times that are less than a minute long\n Then formats the list to ready it for insertion into the database\n ' updatedFreeTimes = [] index = 0 begin_date = arrow.get(begin_date) end_date = arrow.get(end_date) for day in arrow.Arrow.span_range('day', begin_date, end_date): day_start = day[0] day_end = day[1] for currBlock in currentFreeTimes: if ((arrow.get(currBlock[0]) >= day_start) and (arrow.get(currBlock[1]) <= day_end)): handled = False for userBlock in userFreeTimes: if ((arrow.get(userBlock[0]) >= day_start) and (arrow.get(userBlock[1]) <= day_end)): block_begin = str(day_start) block_end = str(day_end) if (userBlock[0] >= currBlock[0]): block_begin = userBlock[0] if (userBlock[1] <= currBlock[1]): block_end = userBlock[1] else: block_end = currBlock[1] handled = True elif (userBlock[0] < currBlock[0]): block_begin = currBlock[0] if (userBlock[1] <= currBlock[1]): block_end = userBlock[1] handled = True else: block_end = currBlock[1] if handled: newBlock = [arrow.get(block_begin), arrow.get(block_end), 'Available'] updatedFreeTimes.append(newBlock) handled = False updatedFreeTimes = crop(updatedFreeTimes, 1) updatedFreeTimes = getPertinentInfo(updatedFreeTimes) for event in updatedFreeTimes: print(event) return updatedFreeTimes
This function takes two lists of free times and combines them Then crops out any resulting free times that are less than a minute long Then formats the list to ready it for insertion into the database
meetings/calculations.py
concatFreeTimes
kaschaefer/MeetMe
0
python
def concatFreeTimes(currentFreeTimes, userFreeTimes, begin_date, end_date): '\n This function takes two lists of free times and combines them\n Then crops out any resulting free times that are less than a minute long\n Then formats the list to ready it for insertion into the database\n ' updatedFreeTimes = [] index = 0 begin_date = arrow.get(begin_date) end_date = arrow.get(end_date) for day in arrow.Arrow.span_range('day', begin_date, end_date): day_start = day[0] day_end = day[1] for currBlock in currentFreeTimes: if ((arrow.get(currBlock[0]) >= day_start) and (arrow.get(currBlock[1]) <= day_end)): handled = False for userBlock in userFreeTimes: if ((arrow.get(userBlock[0]) >= day_start) and (arrow.get(userBlock[1]) <= day_end)): block_begin = str(day_start) block_end = str(day_end) if (userBlock[0] >= currBlock[0]): block_begin = userBlock[0] if (userBlock[1] <= currBlock[1]): block_end = userBlock[1] else: block_end = currBlock[1] handled = True elif (userBlock[0] < currBlock[0]): block_begin = currBlock[0] if (userBlock[1] <= currBlock[1]): block_end = userBlock[1] handled = True else: block_end = currBlock[1] if handled: newBlock = [arrow.get(block_begin), arrow.get(block_end), 'Available'] updatedFreeTimes.append(newBlock) handled = False updatedFreeTimes = crop(updatedFreeTimes, 1) updatedFreeTimes = getPertinentInfo(updatedFreeTimes) for event in updatedFreeTimes: print(event) return updatedFreeTimes
def concatFreeTimes(currentFreeTimes, userFreeTimes, begin_date, end_date): '\n This function takes two lists of free times and combines them\n Then crops out any resulting free times that are less than a minute long\n Then formats the list to ready it for insertion into the database\n ' updatedFreeTimes = [] index = 0 begin_date = arrow.get(begin_date) end_date = arrow.get(end_date) for day in arrow.Arrow.span_range('day', begin_date, end_date): day_start = day[0] day_end = day[1] for currBlock in currentFreeTimes: if ((arrow.get(currBlock[0]) >= day_start) and (arrow.get(currBlock[1]) <= day_end)): handled = False for userBlock in userFreeTimes: if ((arrow.get(userBlock[0]) >= day_start) and (arrow.get(userBlock[1]) <= day_end)): block_begin = str(day_start) block_end = str(day_end) if (userBlock[0] >= currBlock[0]): block_begin = userBlock[0] if (userBlock[1] <= currBlock[1]): block_end = userBlock[1] else: block_end = currBlock[1] handled = True elif (userBlock[0] < currBlock[0]): block_begin = currBlock[0] if (userBlock[1] <= currBlock[1]): block_end = userBlock[1] handled = True else: block_end = currBlock[1] if handled: newBlock = [arrow.get(block_begin), arrow.get(block_end), 'Available'] updatedFreeTimes.append(newBlock) handled = False updatedFreeTimes = crop(updatedFreeTimes, 1) updatedFreeTimes = getPertinentInfo(updatedFreeTimes) for event in updatedFreeTimes: print(event) return updatedFreeTimes<|docstring|>This function takes two lists of free times and combines them Then crops out any resulting free times that are less than a minute long Then formats the list to ready it for insertion into the database<|endoftext|>
0c6c822759f571f873800f445748b78acc4e7b9b5c2010569eb3e3f12fbb7a0d
def add_item_offset(token, sentence): 'Get the start and end offset of a token in a sentence' s_pattern = re.compile(re.escape(token), re.I) token_offset_list = [] for m in s_pattern.finditer(sentence): token_offset_list.append((m.group(), m.start(), m.end())) return token_offset_list
Get the start and end offset of a token in a sentence
legacy/explore/statistic.py
add_item_offset
zhongyuchen/information-extraction
2
python
def add_item_offset(token, sentence): s_pattern = re.compile(re.escape(token), re.I) token_offset_list = [] for m in s_pattern.finditer(sentence): token_offset_list.append((m.group(), m.start(), m.end())) return token_offset_list
def add_item_offset(token, sentence): s_pattern = re.compile(re.escape(token), re.I) token_offset_list = [] for m in s_pattern.finditer(sentence): token_offset_list.append((m.group(), m.start(), m.end())) return token_offset_list<|docstring|>Get the start and end offset of a token in a sentence<|endoftext|>
aacd3e23befb4300e58d2cd5f115c58dfa3c25b64e12841acbbefdfcce8b098d
def cal_item_pos(target_offset, idx_list): 'Get the index list where the token is located' target_idx = [] for target in target_offset: (start, end) = (target[1], target[2]) cur_idx = [] for (i, idx) in enumerate(idx_list): if ((idx >= start) and (idx < end)): cur_idx.append(i) if (len(cur_idx) > 0): target_idx.append(cur_idx) return target_idx
Get the index list where the token is located
legacy/explore/statistic.py
cal_item_pos
zhongyuchen/information-extraction
2
python
def cal_item_pos(target_offset, idx_list): target_idx = [] for target in target_offset: (start, end) = (target[1], target[2]) cur_idx = [] for (i, idx) in enumerate(idx_list): if ((idx >= start) and (idx < end)): cur_idx.append(i) if (len(cur_idx) > 0): target_idx.append(cur_idx) return target_idx
def cal_item_pos(target_offset, idx_list): target_idx = [] for target in target_offset: (start, end) = (target[1], target[2]) cur_idx = [] for (i, idx) in enumerate(idx_list): if ((idx >= start) and (idx < end)): cur_idx.append(i) if (len(cur_idx) > 0): target_idx.append(cur_idx) return target_idx<|docstring|>Get the index list where the token is located<|endoftext|>
ba90065091e4c9c5a488d21d866adcb9f68103e45ea9155444792f07e65c73e9
def subtract(a, b): 'Subtract a from b and return value' return (b - a)
Subtract a from b and return value
app/deleted_files/calc.py
subtract
simeon-s/recipe-app-api
0
python
def subtract(a, b): return (b - a)
def subtract(a, b): return (b - a)<|docstring|>Subtract a from b and return value<|endoftext|>
2cabb5b43715b0eba8a5faf498ffb780f62cb83a344c576654ab04119d83f25a
def getSettings(): '\n Returns:\n The construct settings object.\n ' return settings
Returns: The construct settings object.
settings/settings.py
getSettings
Unknowncmbk/Pokemon-Go-Locator-Server
0
python
def getSettings(): '\n Returns:\n The construct settings object.\n ' return settings
def getSettings(): '\n Returns:\n The construct settings object.\n ' return settings<|docstring|>Returns: The construct settings object.<|endoftext|>
68118909ae7716531883889a9338e4875e244b71841846b43a5fc3509e0a9278
def getDatabase(): '\n Returns: \n The database connection.\n ' try: return settings.db_cxn except Exception as e: print(('Unable to grab DB connection ' % e))
Returns: The database connection.
settings/settings.py
getDatabase
Unknowncmbk/Pokemon-Go-Locator-Server
0
python
def getDatabase(): '\n Returns: \n The database connection.\n ' try: return settings.db_cxn except Exception as e: print(('Unable to grab DB connection ' % e))
def getDatabase(): '\n Returns: \n The database connection.\n ' try: return settings.db_cxn except Exception as e: print(('Unable to grab DB connection ' % e))<|docstring|>Returns: The database connection.<|endoftext|>
cfad5309110a2213500f809626e9ec16940a8f151b6c18e77f707542a88f6c53
def close(self): '\n Closes the DB connection\n ' try: self.db_cxn.close() except Exception as e: print(('Unable to close DB connection ' % e))
Closes the DB connection
settings/settings.py
close
Unknowncmbk/Pokemon-Go-Locator-Server
0
python
def close(self): '\n \n ' try: self.db_cxn.close() except Exception as e: print(('Unable to close DB connection ' % e))
def close(self): '\n \n ' try: self.db_cxn.close() except Exception as e: print(('Unable to close DB connection ' % e))<|docstring|>Closes the DB connection<|endoftext|>
887fe3c5ba52a551c22bb0f8b6dceafe03889dd2475c20c28c5a22467c26aa08
def __init__(self, est, target=None, transformer_pipeline=None, client=None, booster=None, classes=None, name=None): '\n Construct an ADSModel\n\n Parameters\n ----------\n est: fitted estimator object\n The estimator can be a standard sklearn estimator, a keras, lightgbm, or xgboost estimator, or any other object that implement methods from\n (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification.\n target: PandasSeries\n The target column you are using in your dataset, this is assigned as the "y" attribute.\n transformer_pipeline: TransformerPipeline\n A custom trasnformer pipeline object.\n client: Str\n Currently unused.\n booster: Str\n Currently unused.\n classes: list, optional\n List of target classes. Required for classification problem if the est does not contain classes_ attribute.\n name: str, optional\n Name of the model.\n ' self.est = est if utils.is_same_class(transformer_pipeline, Pipeline): self.transformer_pipeline = TransformerPipeline(transformer_pipeline.steps) elif isinstance(transformer_pipeline, list): self.transformer_pipeline = TransformerPipeline(transformer_pipeline) else: self.transformer_pipeline = transformer_pipeline self.target = target if (classes is not None): self.classes_ = classes self.name = (name if (name is not None) else str(est)) self.client = client self.booster = booster self._get_underlying_model_type()
Construct an ADSModel Parameters ---------- est: fitted estimator object The estimator can be a standard sklearn estimator, a keras, lightgbm, or xgboost estimator, or any other object that implement methods from (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification. target: PandasSeries The target column you are using in your dataset, this is assigned as the "y" attribute. transformer_pipeline: TransformerPipeline A custom trasnformer pipeline object. client: Str Currently unused. booster: Str Currently unused. classes: list, optional List of target classes. Required for classification problem if the est does not contain classes_ attribute. name: str, optional Name of the model.
ads/common/model.py
__init__
oracle/accelerated-data-science
20
python
def __init__(self, est, target=None, transformer_pipeline=None, client=None, booster=None, classes=None, name=None): '\n Construct an ADSModel\n\n Parameters\n ----------\n est: fitted estimator object\n The estimator can be a standard sklearn estimator, a keras, lightgbm, or xgboost estimator, or any other object that implement methods from\n (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification.\n target: PandasSeries\n The target column you are using in your dataset, this is assigned as the "y" attribute.\n transformer_pipeline: TransformerPipeline\n A custom trasnformer pipeline object.\n client: Str\n Currently unused.\n booster: Str\n Currently unused.\n classes: list, optional\n List of target classes. Required for classification problem if the est does not contain classes_ attribute.\n name: str, optional\n Name of the model.\n ' self.est = est if utils.is_same_class(transformer_pipeline, Pipeline): self.transformer_pipeline = TransformerPipeline(transformer_pipeline.steps) elif isinstance(transformer_pipeline, list): self.transformer_pipeline = TransformerPipeline(transformer_pipeline) else: self.transformer_pipeline = transformer_pipeline self.target = target if (classes is not None): self.classes_ = classes self.name = (name if (name is not None) else str(est)) self.client = client self.booster = booster self._get_underlying_model_type()
def __init__(self, est, target=None, transformer_pipeline=None, client=None, booster=None, classes=None, name=None): '\n Construct an ADSModel\n\n Parameters\n ----------\n est: fitted estimator object\n The estimator can be a standard sklearn estimator, a keras, lightgbm, or xgboost estimator, or any other object that implement methods from\n (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification.\n target: PandasSeries\n The target column you are using in your dataset, this is assigned as the "y" attribute.\n transformer_pipeline: TransformerPipeline\n A custom trasnformer pipeline object.\n client: Str\n Currently unused.\n booster: Str\n Currently unused.\n classes: list, optional\n List of target classes. Required for classification problem if the est does not contain classes_ attribute.\n name: str, optional\n Name of the model.\n ' self.est = est if utils.is_same_class(transformer_pipeline, Pipeline): self.transformer_pipeline = TransformerPipeline(transformer_pipeline.steps) elif isinstance(transformer_pipeline, list): self.transformer_pipeline = TransformerPipeline(transformer_pipeline) else: self.transformer_pipeline = transformer_pipeline self.target = target if (classes is not None): self.classes_ = classes self.name = (name if (name is not None) else str(est)) self.client = client self.booster = booster self._get_underlying_model_type()<|docstring|>Construct an ADSModel Parameters ---------- est: fitted estimator object The estimator can be a standard sklearn estimator, a keras, lightgbm, or xgboost estimator, or any other object that implement methods from (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification. target: PandasSeries The target column you are using in your dataset, this is assigned as the "y" attribute. transformer_pipeline: TransformerPipeline A custom trasnformer pipeline object. client: Str Currently unused. booster: Str Currently unused. classes: list, optional List of target classes. Required for classification problem if the est does not contain classes_ attribute. name: str, optional Name of the model.<|endoftext|>
814a0742595adc411825a998bf1ac4c7ba0f999941c572f7663db7dc97706621
@staticmethod def from_estimator(est, transformers=None, classes=None, name=None): '\n Build ADSModel from a fitted estimator\n\n Parameters\n ----------\n est: fitted estimator object\n The estimator can be a standard sklearn estimator or any object that implement methods from\n (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification.\n transformers: a scalar or an iterable of objects implementing transform function, optional\n The transform function would be applied on data before calling predict and predict_proba on estimator.\n classes: list, optional\n List of target classes. Required for classification problem if the est does not contain classes_ attribute.\n name: str, optional\n Name of the model.\n\n Returns\n -------\n model: ads.common.model.ADSModel\n Examples\n --------\n >>> model = MyModelClass.train()\n >>> model_ads = from_estimator(model)\n ' if hasattr(est, 'predict'): return ADSModel(est, transformer_pipeline=transformers, classes=classes, name=name) elif callable(est): return ADSModel(est, transformer_pipeline=transformers, classes=classes, name=name)
Build ADSModel from a fitted estimator Parameters ---------- est: fitted estimator object The estimator can be a standard sklearn estimator or any object that implement methods from (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification. transformers: a scalar or an iterable of objects implementing transform function, optional The transform function would be applied on data before calling predict and predict_proba on estimator. classes: list, optional List of target classes. Required for classification problem if the est does not contain classes_ attribute. name: str, optional Name of the model. Returns ------- model: ads.common.model.ADSModel Examples -------- >>> model = MyModelClass.train() >>> model_ads = from_estimator(model)
ads/common/model.py
from_estimator
oracle/accelerated-data-science
20
python
@staticmethod def from_estimator(est, transformers=None, classes=None, name=None): '\n Build ADSModel from a fitted estimator\n\n Parameters\n ----------\n est: fitted estimator object\n The estimator can be a standard sklearn estimator or any object that implement methods from\n (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification.\n transformers: a scalar or an iterable of objects implementing transform function, optional\n The transform function would be applied on data before calling predict and predict_proba on estimator.\n classes: list, optional\n List of target classes. Required for classification problem if the est does not contain classes_ attribute.\n name: str, optional\n Name of the model.\n\n Returns\n -------\n model: ads.common.model.ADSModel\n Examples\n --------\n >>> model = MyModelClass.train()\n >>> model_ads = from_estimator(model)\n ' if hasattr(est, 'predict'): return ADSModel(est, transformer_pipeline=transformers, classes=classes, name=name) elif callable(est): return ADSModel(est, transformer_pipeline=transformers, classes=classes, name=name)
@staticmethod def from_estimator(est, transformers=None, classes=None, name=None): '\n Build ADSModel from a fitted estimator\n\n Parameters\n ----------\n est: fitted estimator object\n The estimator can be a standard sklearn estimator or any object that implement methods from\n (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification.\n transformers: a scalar or an iterable of objects implementing transform function, optional\n The transform function would be applied on data before calling predict and predict_proba on estimator.\n classes: list, optional\n List of target classes. Required for classification problem if the est does not contain classes_ attribute.\n name: str, optional\n Name of the model.\n\n Returns\n -------\n model: ads.common.model.ADSModel\n Examples\n --------\n >>> model = MyModelClass.train()\n >>> model_ads = from_estimator(model)\n ' if hasattr(est, 'predict'): return ADSModel(est, transformer_pipeline=transformers, classes=classes, name=name) elif callable(est): return ADSModel(est, transformer_pipeline=transformers, classes=classes, name=name)<|docstring|>Build ADSModel from a fitted estimator Parameters ---------- est: fitted estimator object The estimator can be a standard sklearn estimator or any object that implement methods from (BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification. transformers: a scalar or an iterable of objects implementing transform function, optional The transform function would be applied on data before calling predict and predict_proba on estimator. classes: list, optional List of target classes. Required for classification problem if the est does not contain classes_ attribute. name: str, optional Name of the model. Returns ------- model: ads.common.model.ADSModel Examples -------- >>> model = MyModelClass.train() >>> model_ads = from_estimator(model)<|endoftext|>
dc5077e92701500584990c5cfd9835a63b87e8e7caa84afe4b2893530794cca1
def rename(self, name): '\n Changes the name of a model\n\n Parameters\n ----------\n name: str\n A string which is supplied for naming a model.\n ' self.name = name
Changes the name of a model Parameters ---------- name: str A string which is supplied for naming a model.
ads/common/model.py
rename
oracle/accelerated-data-science
20
python
def rename(self, name): '\n Changes the name of a model\n\n Parameters\n ----------\n name: str\n A string which is supplied for naming a model.\n ' self.name = name
def rename(self, name): '\n Changes the name of a model\n\n Parameters\n ----------\n name: str\n A string which is supplied for naming a model.\n ' self.name = name<|docstring|>Changes the name of a model Parameters ---------- name: str A string which is supplied for naming a model.<|endoftext|>
6cc3b0e576e18151b7b983b33f91daaed9eeccb5be374b2aff57e8d695ff28cd
def predict(self, X): '\n Runs the models predict function on some data\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n\n Returns\n -------\n Union[List, pandas.Series], depending on the estimator\n Usually a list or PandasSeries of predictions\n ' X = self.transform(X) if (self._underlying_model in ['torch']): return self.est(X) if ((self.client is not None) and (self.booster is not None)): return self.est.predict(self.client, self.booster, X).persist() else: return self.est.predict(X)
Runs the models predict function on some data Parameters ---------- X: MLData A MLData object which holds the examples to be predicted on. Returns ------- Union[List, pandas.Series], depending on the estimator Usually a list or PandasSeries of predictions
ads/common/model.py
predict
oracle/accelerated-data-science
20
python
def predict(self, X): '\n Runs the models predict function on some data\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n\n Returns\n -------\n Union[List, pandas.Series], depending on the estimator\n Usually a list or PandasSeries of predictions\n ' X = self.transform(X) if (self._underlying_model in ['torch']): return self.est(X) if ((self.client is not None) and (self.booster is not None)): return self.est.predict(self.client, self.booster, X).persist() else: return self.est.predict(X)
def predict(self, X): '\n Runs the models predict function on some data\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n\n Returns\n -------\n Union[List, pandas.Series], depending on the estimator\n Usually a list or PandasSeries of predictions\n ' X = self.transform(X) if (self._underlying_model in ['torch']): return self.est(X) if ((self.client is not None) and (self.booster is not None)): return self.est.predict(self.client, self.booster, X).persist() else: return self.est.predict(X)<|docstring|>Runs the models predict function on some data Parameters ---------- X: MLData A MLData object which holds the examples to be predicted on. Returns ------- Union[List, pandas.Series], depending on the estimator Usually a list or PandasSeries of predictions<|endoftext|>
13693e79e9a4e36f342524085450aafab811d8b6a5f159ee31518cd2a4856170
def predict_proba(self, X): '\n Runs the models predict probabilities function on some data\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n\n Returns\n -------\n Union[List, pandas.Series], depending on the estimator\n Usually a list or PandasSeries of predictions\n ' X = self.transform(X) if (self._underlying_model in ['torch']): return self.est(X) if ((self.client is not None) and (self.booster is not None)): return self.est.predict_proba(self.client, self.booster, X).persist() else: return self.est.predict_proba(X)
Runs the models predict probabilities function on some data Parameters ---------- X: MLData A MLData object which holds the examples to be predicted on. Returns ------- Union[List, pandas.Series], depending on the estimator Usually a list or PandasSeries of predictions
ads/common/model.py
predict_proba
oracle/accelerated-data-science
20
python
def predict_proba(self, X): '\n Runs the models predict probabilities function on some data\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n\n Returns\n -------\n Union[List, pandas.Series], depending on the estimator\n Usually a list or PandasSeries of predictions\n ' X = self.transform(X) if (self._underlying_model in ['torch']): return self.est(X) if ((self.client is not None) and (self.booster is not None)): return self.est.predict_proba(self.client, self.booster, X).persist() else: return self.est.predict_proba(X)
def predict_proba(self, X): '\n Runs the models predict probabilities function on some data\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n\n Returns\n -------\n Union[List, pandas.Series], depending on the estimator\n Usually a list or PandasSeries of predictions\n ' X = self.transform(X) if (self._underlying_model in ['torch']): return self.est(X) if ((self.client is not None) and (self.booster is not None)): return self.est.predict_proba(self.client, self.booster, X).persist() else: return self.est.predict_proba(X)<|docstring|>Runs the models predict probabilities function on some data Parameters ---------- X: MLData A MLData object which holds the examples to be predicted on. Returns ------- Union[List, pandas.Series], depending on the estimator Usually a list or PandasSeries of predictions<|endoftext|>
1422fe730d4e4ae51186ea18755190977242125bd1d1615f182e3b6979a97bfc
def score(self, X, y_true, score_fn=None): '\n Scores a model according to a custom score function\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n y_true: MLData\n A MLData object which holds ground truth labels for the examples which are being predicted on.\n score_fn: Scorer (callable)\n A callable object that returns a score, usually created with sklearn.metrics.make_scorer().\n\n Returns\n -------\n float, depending on the estimator\n Almost always a scalar score (usually a float).\n ' X = self.transform(X) if score_fn: return score_fn(self, X, y_true) else: assert hasattr(self.est, 'score'), f'Could not find a score function for estimator of type: {self._underlying_model}. Pass in your desired scoring function to score_fn ' if ((self.client is not None) and (self.booster is not None)): return self.est.score(self.client, self.booster, X, y_true).persist() else: return self.est.score(X, y_true)
Scores a model according to a custom score function Parameters ---------- X: MLData A MLData object which holds the examples to be predicted on. y_true: MLData A MLData object which holds ground truth labels for the examples which are being predicted on. score_fn: Scorer (callable) A callable object that returns a score, usually created with sklearn.metrics.make_scorer(). Returns ------- float, depending on the estimator Almost always a scalar score (usually a float).
ads/common/model.py
score
oracle/accelerated-data-science
20
python
def score(self, X, y_true, score_fn=None): '\n Scores a model according to a custom score function\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n y_true: MLData\n A MLData object which holds ground truth labels for the examples which are being predicted on.\n score_fn: Scorer (callable)\n A callable object that returns a score, usually created with sklearn.metrics.make_scorer().\n\n Returns\n -------\n float, depending on the estimator\n Almost always a scalar score (usually a float).\n ' X = self.transform(X) if score_fn: return score_fn(self, X, y_true) else: assert hasattr(self.est, 'score'), f'Could not find a score function for estimator of type: {self._underlying_model}. Pass in your desired scoring function to score_fn ' if ((self.client is not None) and (self.booster is not None)): return self.est.score(self.client, self.booster, X, y_true).persist() else: return self.est.score(X, y_true)
def score(self, X, y_true, score_fn=None): '\n Scores a model according to a custom score function\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be predicted on.\n y_true: MLData\n A MLData object which holds ground truth labels for the examples which are being predicted on.\n score_fn: Scorer (callable)\n A callable object that returns a score, usually created with sklearn.metrics.make_scorer().\n\n Returns\n -------\n float, depending on the estimator\n Almost always a scalar score (usually a float).\n ' X = self.transform(X) if score_fn: return score_fn(self, X, y_true) else: assert hasattr(self.est, 'score'), f'Could not find a score function for estimator of type: {self._underlying_model}. Pass in your desired scoring function to score_fn ' if ((self.client is not None) and (self.booster is not None)): return self.est.score(self.client, self.booster, X, y_true).persist() else: return self.est.score(X, y_true)<|docstring|>Scores a model according to a custom score function Parameters ---------- X: MLData A MLData object which holds the examples to be predicted on. y_true: MLData A MLData object which holds ground truth labels for the examples which are being predicted on. score_fn: Scorer (callable) A callable object that returns a score, usually created with sklearn.metrics.make_scorer(). Returns ------- float, depending on the estimator Almost always a scalar score (usually a float).<|endoftext|>
5a3bb02db0787996874abb41eb25457e655c1fb7d42c9b568463ef21be70abfd
def summary(self): '\n A summary of the ADSModel\n ' print(self)
A summary of the ADSModel
ads/common/model.py
summary
oracle/accelerated-data-science
20
python
def summary(self): '\n \n ' print(self)
def summary(self): '\n \n ' print(self)<|docstring|>A summary of the ADSModel<|endoftext|>
7750cea841f507bcfbf659d327de36a86f9f2b2a994173a4bbccbc979f9a81e4
def transform(self, X): '\n Process some MLData through the selected ADSModel transformers\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be transformed.\n ' if hasattr(X, 'copy'): X = X.copy() if (self.transformer_pipeline is not None): transformer_pipeline = self.transformer_pipeline if (not isinstance(transformer_pipeline, Iterable)): transformer_pipeline = [self.transformer_pipeline] for transformer in transformer_pipeline: try: X = transformer.transform(X) except Exception as e: pass if ((self.target is not None) and (self.target in X.columns)): X = X.drop(self.target, axis=1) return X
Process some MLData through the selected ADSModel transformers Parameters ---------- X: MLData A MLData object which holds the examples to be transformed.
ads/common/model.py
transform
oracle/accelerated-data-science
20
python
def transform(self, X): '\n Process some MLData through the selected ADSModel transformers\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be transformed.\n ' if hasattr(X, 'copy'): X = X.copy() if (self.transformer_pipeline is not None): transformer_pipeline = self.transformer_pipeline if (not isinstance(transformer_pipeline, Iterable)): transformer_pipeline = [self.transformer_pipeline] for transformer in transformer_pipeline: try: X = transformer.transform(X) except Exception as e: pass if ((self.target is not None) and (self.target in X.columns)): X = X.drop(self.target, axis=1) return X
def transform(self, X): '\n Process some MLData through the selected ADSModel transformers\n\n Parameters\n ----------\n X: MLData\n A MLData object which holds the examples to be transformed.\n ' if hasattr(X, 'copy'): X = X.copy() if (self.transformer_pipeline is not None): transformer_pipeline = self.transformer_pipeline if (not isinstance(transformer_pipeline, Iterable)): transformer_pipeline = [self.transformer_pipeline] for transformer in transformer_pipeline: try: X = transformer.transform(X) except Exception as e: pass if ((self.target is not None) and (self.target in X.columns)): X = X.drop(self.target, axis=1) return X<|docstring|>Process some MLData through the selected ADSModel transformers Parameters ---------- X: MLData A MLData object which holds the examples to be transformed.<|endoftext|>
c05314f01faefdd2f7778f167371d19e61b74379a7da02109374d104df9c8962
def is_classifier(self): '\n Returns True if ADS believes that the model is a classifier\n\n Returns\n -------\n Boolean: True if the model is a classifier, False otherwise.\n ' return (hasattr(self, 'classes_') and (self.classes_ is not None))
Returns True if ADS believes that the model is a classifier Returns ------- Boolean: True if the model is a classifier, False otherwise.
ads/common/model.py
is_classifier
oracle/accelerated-data-science
20
python
def is_classifier(self): '\n Returns True if ADS believes that the model is a classifier\n\n Returns\n -------\n Boolean: True if the model is a classifier, False otherwise.\n ' return (hasattr(self, 'classes_') and (self.classes_ is not None))
def is_classifier(self): '\n Returns True if ADS believes that the model is a classifier\n\n Returns\n -------\n Boolean: True if the model is a classifier, False otherwise.\n ' return (hasattr(self, 'classes_') and (self.classes_ is not None))<|docstring|>Returns True if ADS believes that the model is a classifier Returns ------- Boolean: True if the model is a classifier, False otherwise.<|endoftext|>
a65d2f80c21897bf2123b54fe85d2e64dce9a80427f26747795ee9fced4c584d
def prepare(self, target_dir=None, data_sample=None, X_sample=None, y_sample=None, include_data_sample=False, force_overwrite=False, fn_artifact_files_included=False, fn_name='model_api', inference_conda_env=None, data_science_env=False, ignore_deployment_error=False, use_case_type=None, inference_python_version=None, imputed_values={}, **kwargs): '\n Prepare model artifact directory to be published to model catalog\n\n Parameters\n ----------\n target_dir : str, default: model.name[:12]\n Target directory under which the model artifact files need to be added\n data_sample : ADSData\n Note: This format is preferable to X_sample and y_sample.\n A sample of the test data that will be provided to predict() API of scoring script\n Used to generate schema_input.json and schema_output.json which defines the input and output formats\n X_sample : pandas.DataFrame\n A sample of input data that will be provided to predict() API of scoring script\n Used to generate schema.json which defines the input formats\n y_sample : pandas.Series\n A sample of output data that is expected to be returned by predict() API of scoring script,\n corresponding to X_sample\n Used to generate schema_output.json which defines the output formats\n force_overwrite : bool, default: False\n If True, overwrites the target directory if exists already\n fn_artifact_files_included : bool, default: True\n If True, generates artifacts to export a model as a function without ads dependency\n fn_name : str, default: \'model_api\'\n Required parameter if fn_artifact_files_included parameter is setup.\n inference_conda_env : str, default: None\n Conda environment to use within the model deployment service for inferencing\n data_science_env : bool, default: False\n If set to True, datascience environment represented by the slug in the training conda environment will be used.\n ignore_deployment_error : bool, default: False\n If set to True, the prepare will ignore all the errors that may impact model deployment\n use_case_type: str\n The use case type of the model. Use it through UserCaseType class or string provided in UseCaseType. For\n example, use_case_type=UseCaseType.BINARY_CLASSIFICATION or use_case_type="binary_classification". Check\n with UseCaseType class to see all supported types.\n inference_python_version: str, default:None.\n If provided will be added to the generated runtime yaml\n\n **kwargs\n --------\n max_col_num: (int, optional). Defaults to utils.DATA_SCHEMA_MAX_COL_NUM.\n The maximum column size of the data that allows to auto generate schema.\n\n Returns\n -------\n model_artifact: an instance of `ModelArtifact` that can be used to test the generated scoring script\n ' if include_data_sample: logger.warning(f'Parameter `include_data_sample` is deprecated and removed in future releases. Data sample is not saved. You can manually save the data sample to {target_dir}.') ProgressStepsWFn = (Progress_Steps_W_Fn + 1) ProgressStepsWoFn = (Progress_Steps_Wo_Fn + 1) if (target_dir is None): logger.info(f'Using the default directory {self.name[:12]} to create the model artifact. Use `target_dir` to specify a directory.') can_generate_fn_files = (fn_artifact_files_included and (self._underlying_model not in Unsupported_Model_Types)) assert ((data_sample is not None) or (X_sample is not None)), 'You must provide a data sample to infer the input and output data types which are used when converting the the model to an equivalent onnx model. This can be done as an ADSData object with the parameter `data_sample`, or as X and y samples to X_sample and y_sample respectively. ' with utils.get_progress_bar((ProgressStepsWFn if can_generate_fn_files else ProgressStepsWoFn)) as progress: progress.update('Preparing Model Artifact Directory') if os.path.exists(target_dir): if (not force_overwrite): raise ValueError('Directory already exists, set force to overwrite') os.makedirs(target_dir, exist_ok=True) shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'artifact/.model-ignore'), os.path.join(target_dir, '.model-ignore')) distutils.dir_util._path_created = {} progress.update('Serializing model') X_sample = (data_sample.X if ((X_sample is None) and (data_sample is not None)) else X_sample) y_sample = (data_sample.y if ((y_sample is None) and (data_sample is not None)) else y_sample) X_trans = self._onnx_data_transformer(X=X_sample, imputed_values=imputed_values) model_kwargs = serialize_model(model=self, target_dir=target_dir, X=X_trans, y=y_sample, model_type=self._underlying_model) max_col_num = kwargs.get('max_col_num', utils.DATA_SCHEMA_MAX_COL_NUM) if (self._underlying_model not in NoTransformModels): try: self.onnx_data_preprocessor.save(os.path.join(target_dir, 'onnx_data_transformer.json')) except Exception as e: logger.error(f'Unable to serialize the data transformer due to: {e}.') raise e if (model_kwargs.get('serializer', '') != 'onnx'): model_kwargs['model_libs'] = utils.extract_lib_dependencies_from_model(self.est) model_kwargs['underlying_model'] = self._underlying_model model_kwargs['progress'] = progress model_kwargs['inference_conda_env'] = inference_conda_env model_kwargs['data_science_env'] = data_science_env model_kwargs['ignore_deployment_error'] = ignore_deployment_error model_kwargs['use_case_type'] = use_case_type model_kwargs['max_col_num'] = max_col_num model_artifact = prepare_generic_model(target_dir, model=self.est, data_sample=data_sample, X_sample=X_sample, y_sample=y_sample, fn_artifact_files_included=fn_artifact_files_included, fn_name=fn_name, force_overwrite=force_overwrite, inference_python_version=inference_python_version, **model_kwargs) try: model_file_name = ('model.pkl' if (self._underlying_model == 'automl') else 'model.onnx') model_artifact.reload(model_file_name=model_file_name) except Exception as e: print(str(e)) msg = '\nWARNING: Validation using scoring script failed. Update the inference script(score.py) as required. ' print((('\x1b[93m' + msg) + '\x1b[0m')) if os.path.exists(os.path.join(target_dir, '__pycache__')): shutil.rmtree(os.path.join(target_dir, '__pycache__'), ignore_errors=True) logger.info(model_artifact.__repr__()) return model_artifact
Prepare model artifact directory to be published to model catalog Parameters ---------- target_dir : str, default: model.name[:12] Target directory under which the model artifact files need to be added data_sample : ADSData Note: This format is preferable to X_sample and y_sample. A sample of the test data that will be provided to predict() API of scoring script Used to generate schema_input.json and schema_output.json which defines the input and output formats X_sample : pandas.DataFrame A sample of input data that will be provided to predict() API of scoring script Used to generate schema.json which defines the input formats y_sample : pandas.Series A sample of output data that is expected to be returned by predict() API of scoring script, corresponding to X_sample Used to generate schema_output.json which defines the output formats force_overwrite : bool, default: False If True, overwrites the target directory if exists already fn_artifact_files_included : bool, default: True If True, generates artifacts to export a model as a function without ads dependency fn_name : str, default: 'model_api' Required parameter if fn_artifact_files_included parameter is setup. inference_conda_env : str, default: None Conda environment to use within the model deployment service for inferencing data_science_env : bool, default: False If set to True, datascience environment represented by the slug in the training conda environment will be used. ignore_deployment_error : bool, default: False If set to True, the prepare will ignore all the errors that may impact model deployment use_case_type: str The use case type of the model. Use it through UserCaseType class or string provided in UseCaseType. For example, use_case_type=UseCaseType.BINARY_CLASSIFICATION or use_case_type="binary_classification". Check with UseCaseType class to see all supported types. inference_python_version: str, default:None. If provided will be added to the generated runtime yaml **kwargs -------- max_col_num: (int, optional). Defaults to utils.DATA_SCHEMA_MAX_COL_NUM. The maximum column size of the data that allows to auto generate schema. Returns ------- model_artifact: an instance of `ModelArtifact` that can be used to test the generated scoring script
ads/common/model.py
prepare
oracle/accelerated-data-science
20
python
def prepare(self, target_dir=None, data_sample=None, X_sample=None, y_sample=None, include_data_sample=False, force_overwrite=False, fn_artifact_files_included=False, fn_name='model_api', inference_conda_env=None, data_science_env=False, ignore_deployment_error=False, use_case_type=None, inference_python_version=None, imputed_values={}, **kwargs): '\n Prepare model artifact directory to be published to model catalog\n\n Parameters\n ----------\n target_dir : str, default: model.name[:12]\n Target directory under which the model artifact files need to be added\n data_sample : ADSData\n Note: This format is preferable to X_sample and y_sample.\n A sample of the test data that will be provided to predict() API of scoring script\n Used to generate schema_input.json and schema_output.json which defines the input and output formats\n X_sample : pandas.DataFrame\n A sample of input data that will be provided to predict() API of scoring script\n Used to generate schema.json which defines the input formats\n y_sample : pandas.Series\n A sample of output data that is expected to be returned by predict() API of scoring script,\n corresponding to X_sample\n Used to generate schema_output.json which defines the output formats\n force_overwrite : bool, default: False\n If True, overwrites the target directory if exists already\n fn_artifact_files_included : bool, default: True\n If True, generates artifacts to export a model as a function without ads dependency\n fn_name : str, default: \'model_api\'\n Required parameter if fn_artifact_files_included parameter is setup.\n inference_conda_env : str, default: None\n Conda environment to use within the model deployment service for inferencing\n data_science_env : bool, default: False\n If set to True, datascience environment represented by the slug in the training conda environment will be used.\n ignore_deployment_error : bool, default: False\n If set to True, the prepare will ignore all the errors that may impact model deployment\n use_case_type: str\n The use case type of the model. Use it through UserCaseType class or string provided in UseCaseType. For\n example, use_case_type=UseCaseType.BINARY_CLASSIFICATION or use_case_type="binary_classification". Check\n with UseCaseType class to see all supported types.\n inference_python_version: str, default:None.\n If provided will be added to the generated runtime yaml\n\n **kwargs\n --------\n max_col_num: (int, optional). Defaults to utils.DATA_SCHEMA_MAX_COL_NUM.\n The maximum column size of the data that allows to auto generate schema.\n\n Returns\n -------\n model_artifact: an instance of `ModelArtifact` that can be used to test the generated scoring script\n ' if include_data_sample: logger.warning(f'Parameter `include_data_sample` is deprecated and removed in future releases. Data sample is not saved. You can manually save the data sample to {target_dir}.') ProgressStepsWFn = (Progress_Steps_W_Fn + 1) ProgressStepsWoFn = (Progress_Steps_Wo_Fn + 1) if (target_dir is None): logger.info(f'Using the default directory {self.name[:12]} to create the model artifact. Use `target_dir` to specify a directory.') can_generate_fn_files = (fn_artifact_files_included and (self._underlying_model not in Unsupported_Model_Types)) assert ((data_sample is not None) or (X_sample is not None)), 'You must provide a data sample to infer the input and output data types which are used when converting the the model to an equivalent onnx model. This can be done as an ADSData object with the parameter `data_sample`, or as X and y samples to X_sample and y_sample respectively. ' with utils.get_progress_bar((ProgressStepsWFn if can_generate_fn_files else ProgressStepsWoFn)) as progress: progress.update('Preparing Model Artifact Directory') if os.path.exists(target_dir): if (not force_overwrite): raise ValueError('Directory already exists, set force to overwrite') os.makedirs(target_dir, exist_ok=True) shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'artifact/.model-ignore'), os.path.join(target_dir, '.model-ignore')) distutils.dir_util._path_created = {} progress.update('Serializing model') X_sample = (data_sample.X if ((X_sample is None) and (data_sample is not None)) else X_sample) y_sample = (data_sample.y if ((y_sample is None) and (data_sample is not None)) else y_sample) X_trans = self._onnx_data_transformer(X=X_sample, imputed_values=imputed_values) model_kwargs = serialize_model(model=self, target_dir=target_dir, X=X_trans, y=y_sample, model_type=self._underlying_model) max_col_num = kwargs.get('max_col_num', utils.DATA_SCHEMA_MAX_COL_NUM) if (self._underlying_model not in NoTransformModels): try: self.onnx_data_preprocessor.save(os.path.join(target_dir, 'onnx_data_transformer.json')) except Exception as e: logger.error(f'Unable to serialize the data transformer due to: {e}.') raise e if (model_kwargs.get('serializer', ) != 'onnx'): model_kwargs['model_libs'] = utils.extract_lib_dependencies_from_model(self.est) model_kwargs['underlying_model'] = self._underlying_model model_kwargs['progress'] = progress model_kwargs['inference_conda_env'] = inference_conda_env model_kwargs['data_science_env'] = data_science_env model_kwargs['ignore_deployment_error'] = ignore_deployment_error model_kwargs['use_case_type'] = use_case_type model_kwargs['max_col_num'] = max_col_num model_artifact = prepare_generic_model(target_dir, model=self.est, data_sample=data_sample, X_sample=X_sample, y_sample=y_sample, fn_artifact_files_included=fn_artifact_files_included, fn_name=fn_name, force_overwrite=force_overwrite, inference_python_version=inference_python_version, **model_kwargs) try: model_file_name = ('model.pkl' if (self._underlying_model == 'automl') else 'model.onnx') model_artifact.reload(model_file_name=model_file_name) except Exception as e: print(str(e)) msg = '\nWARNING: Validation using scoring script failed. Update the inference script(score.py) as required. ' print((('\x1b[93m' + msg) + '\x1b[0m')) if os.path.exists(os.path.join(target_dir, '__pycache__')): shutil.rmtree(os.path.join(target_dir, '__pycache__'), ignore_errors=True) logger.info(model_artifact.__repr__()) return model_artifact
def prepare(self, target_dir=None, data_sample=None, X_sample=None, y_sample=None, include_data_sample=False, force_overwrite=False, fn_artifact_files_included=False, fn_name='model_api', inference_conda_env=None, data_science_env=False, ignore_deployment_error=False, use_case_type=None, inference_python_version=None, imputed_values={}, **kwargs): '\n Prepare model artifact directory to be published to model catalog\n\n Parameters\n ----------\n target_dir : str, default: model.name[:12]\n Target directory under which the model artifact files need to be added\n data_sample : ADSData\n Note: This format is preferable to X_sample and y_sample.\n A sample of the test data that will be provided to predict() API of scoring script\n Used to generate schema_input.json and schema_output.json which defines the input and output formats\n X_sample : pandas.DataFrame\n A sample of input data that will be provided to predict() API of scoring script\n Used to generate schema.json which defines the input formats\n y_sample : pandas.Series\n A sample of output data that is expected to be returned by predict() API of scoring script,\n corresponding to X_sample\n Used to generate schema_output.json which defines the output formats\n force_overwrite : bool, default: False\n If True, overwrites the target directory if exists already\n fn_artifact_files_included : bool, default: True\n If True, generates artifacts to export a model as a function without ads dependency\n fn_name : str, default: \'model_api\'\n Required parameter if fn_artifact_files_included parameter is setup.\n inference_conda_env : str, default: None\n Conda environment to use within the model deployment service for inferencing\n data_science_env : bool, default: False\n If set to True, datascience environment represented by the slug in the training conda environment will be used.\n ignore_deployment_error : bool, default: False\n If set to True, the prepare will ignore all the errors that may impact model deployment\n use_case_type: str\n The use case type of the model. Use it through UserCaseType class or string provided in UseCaseType. For\n example, use_case_type=UseCaseType.BINARY_CLASSIFICATION or use_case_type="binary_classification". Check\n with UseCaseType class to see all supported types.\n inference_python_version: str, default:None.\n If provided will be added to the generated runtime yaml\n\n **kwargs\n --------\n max_col_num: (int, optional). Defaults to utils.DATA_SCHEMA_MAX_COL_NUM.\n The maximum column size of the data that allows to auto generate schema.\n\n Returns\n -------\n model_artifact: an instance of `ModelArtifact` that can be used to test the generated scoring script\n ' if include_data_sample: logger.warning(f'Parameter `include_data_sample` is deprecated and removed in future releases. Data sample is not saved. You can manually save the data sample to {target_dir}.') ProgressStepsWFn = (Progress_Steps_W_Fn + 1) ProgressStepsWoFn = (Progress_Steps_Wo_Fn + 1) if (target_dir is None): logger.info(f'Using the default directory {self.name[:12]} to create the model artifact. Use `target_dir` to specify a directory.') can_generate_fn_files = (fn_artifact_files_included and (self._underlying_model not in Unsupported_Model_Types)) assert ((data_sample is not None) or (X_sample is not None)), 'You must provide a data sample to infer the input and output data types which are used when converting the the model to an equivalent onnx model. This can be done as an ADSData object with the parameter `data_sample`, or as X and y samples to X_sample and y_sample respectively. ' with utils.get_progress_bar((ProgressStepsWFn if can_generate_fn_files else ProgressStepsWoFn)) as progress: progress.update('Preparing Model Artifact Directory') if os.path.exists(target_dir): if (not force_overwrite): raise ValueError('Directory already exists, set force to overwrite') os.makedirs(target_dir, exist_ok=True) shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'artifact/.model-ignore'), os.path.join(target_dir, '.model-ignore')) distutils.dir_util._path_created = {} progress.update('Serializing model') X_sample = (data_sample.X if ((X_sample is None) and (data_sample is not None)) else X_sample) y_sample = (data_sample.y if ((y_sample is None) and (data_sample is not None)) else y_sample) X_trans = self._onnx_data_transformer(X=X_sample, imputed_values=imputed_values) model_kwargs = serialize_model(model=self, target_dir=target_dir, X=X_trans, y=y_sample, model_type=self._underlying_model) max_col_num = kwargs.get('max_col_num', utils.DATA_SCHEMA_MAX_COL_NUM) if (self._underlying_model not in NoTransformModels): try: self.onnx_data_preprocessor.save(os.path.join(target_dir, 'onnx_data_transformer.json')) except Exception as e: logger.error(f'Unable to serialize the data transformer due to: {e}.') raise e if (model_kwargs.get('serializer', ) != 'onnx'): model_kwargs['model_libs'] = utils.extract_lib_dependencies_from_model(self.est) model_kwargs['underlying_model'] = self._underlying_model model_kwargs['progress'] = progress model_kwargs['inference_conda_env'] = inference_conda_env model_kwargs['data_science_env'] = data_science_env model_kwargs['ignore_deployment_error'] = ignore_deployment_error model_kwargs['use_case_type'] = use_case_type model_kwargs['max_col_num'] = max_col_num model_artifact = prepare_generic_model(target_dir, model=self.est, data_sample=data_sample, X_sample=X_sample, y_sample=y_sample, fn_artifact_files_included=fn_artifact_files_included, fn_name=fn_name, force_overwrite=force_overwrite, inference_python_version=inference_python_version, **model_kwargs) try: model_file_name = ('model.pkl' if (self._underlying_model == 'automl') else 'model.onnx') model_artifact.reload(model_file_name=model_file_name) except Exception as e: print(str(e)) msg = '\nWARNING: Validation using scoring script failed. Update the inference script(score.py) as required. ' print((('\x1b[93m' + msg) + '\x1b[0m')) if os.path.exists(os.path.join(target_dir, '__pycache__')): shutil.rmtree(os.path.join(target_dir, '__pycache__'), ignore_errors=True) logger.info(model_artifact.__repr__()) return model_artifact<|docstring|>Prepare model artifact directory to be published to model catalog Parameters ---------- target_dir : str, default: model.name[:12] Target directory under which the model artifact files need to be added data_sample : ADSData Note: This format is preferable to X_sample and y_sample. A sample of the test data that will be provided to predict() API of scoring script Used to generate schema_input.json and schema_output.json which defines the input and output formats X_sample : pandas.DataFrame A sample of input data that will be provided to predict() API of scoring script Used to generate schema.json which defines the input formats y_sample : pandas.Series A sample of output data that is expected to be returned by predict() API of scoring script, corresponding to X_sample Used to generate schema_output.json which defines the output formats force_overwrite : bool, default: False If True, overwrites the target directory if exists already fn_artifact_files_included : bool, default: True If True, generates artifacts to export a model as a function without ads dependency fn_name : str, default: 'model_api' Required parameter if fn_artifact_files_included parameter is setup. inference_conda_env : str, default: None Conda environment to use within the model deployment service for inferencing data_science_env : bool, default: False If set to True, datascience environment represented by the slug in the training conda environment will be used. ignore_deployment_error : bool, default: False If set to True, the prepare will ignore all the errors that may impact model deployment use_case_type: str The use case type of the model. Use it through UserCaseType class or string provided in UseCaseType. For example, use_case_type=UseCaseType.BINARY_CLASSIFICATION or use_case_type="binary_classification". Check with UseCaseType class to see all supported types. inference_python_version: str, default:None. If provided will be added to the generated runtime yaml **kwargs -------- max_col_num: (int, optional). Defaults to utils.DATA_SCHEMA_MAX_COL_NUM. The maximum column size of the data that allows to auto generate schema. Returns ------- model_artifact: an instance of `ModelArtifact` that can be used to test the generated scoring script<|endoftext|>
c540c0b0d8c4aa6b76400493ea52578fc780f2730ce50a200fd35ac6c0a86584
def visualize_transforms(self): '\n A graph of the ADSModel transformer pipeline.\n It is only supported in JupyterLabs Notebooks.\n ' self.transformer_pipeline.visualize()
A graph of the ADSModel transformer pipeline. It is only supported in JupyterLabs Notebooks.
ads/common/model.py
visualize_transforms
oracle/accelerated-data-science
20
python
def visualize_transforms(self): '\n A graph of the ADSModel transformer pipeline.\n It is only supported in JupyterLabs Notebooks.\n ' self.transformer_pipeline.visualize()
def visualize_transforms(self): '\n A graph of the ADSModel transformer pipeline.\n It is only supported in JupyterLabs Notebooks.\n ' self.transformer_pipeline.visualize()<|docstring|>A graph of the ADSModel transformer pipeline. It is only supported in JupyterLabs Notebooks.<|endoftext|>
6bb4d3779a5728a57ccde7d26e068eb71586ac6fb1aedc6fe624a18bc0a7c982
def show_in_notebook(self): "\n Describe the model by showing it's properties\n " if (self._underlying_model == 'automl'): info = [['Model Name', self.name], ['Target Variable', self.target], ['Selected Algorithm', self.est.selected_model_], ['Task', self.est.task], ['Training Dataset Size', self.est.train_shape_], ['CV', self.est.cv_], ['Optimization Metric', self.est.score_metric], ['Selected Hyperparameters', self.est.selected_model_params_], ['Initial Number of Features', self.est.train_shape_[1]], ['Initial Features', self.est.pipeline.orig_feature_names], ['Selected Number of Features', len(self.est.selected_features_names_)], ['Selected Features', self.est.selected_features_names_]] else: info = [['Model Name', self.name], ['Target Variable', (self.target if (self.target is not None) else 'not available from estimator')], ['Selected Hyperparameters', (self.est.get_params() if hasattr(self.est, 'get_params') else None)], ['Framework', self.est.__class__.__module__], ['Estimator Class', self.est.__class__.__name__], ['Contained Estimator', (self.est.est.__class__.__name__ if hasattr(self.est, 'est') else None)]] info_df = pd.DataFrame(info) if is_notebook(): with pd.option_context('display.max_colwidth', 1000, 'display.width', None, 'display.precision', 4): display(HTML(info_df.to_html(index=False, header=False))) return info
Describe the model by showing it's properties
ads/common/model.py
show_in_notebook
oracle/accelerated-data-science
20
python
def show_in_notebook(self): "\n \n " if (self._underlying_model == 'automl'): info = [['Model Name', self.name], ['Target Variable', self.target], ['Selected Algorithm', self.est.selected_model_], ['Task', self.est.task], ['Training Dataset Size', self.est.train_shape_], ['CV', self.est.cv_], ['Optimization Metric', self.est.score_metric], ['Selected Hyperparameters', self.est.selected_model_params_], ['Initial Number of Features', self.est.train_shape_[1]], ['Initial Features', self.est.pipeline.orig_feature_names], ['Selected Number of Features', len(self.est.selected_features_names_)], ['Selected Features', self.est.selected_features_names_]] else: info = [['Model Name', self.name], ['Target Variable', (self.target if (self.target is not None) else 'not available from estimator')], ['Selected Hyperparameters', (self.est.get_params() if hasattr(self.est, 'get_params') else None)], ['Framework', self.est.__class__.__module__], ['Estimator Class', self.est.__class__.__name__], ['Contained Estimator', (self.est.est.__class__.__name__ if hasattr(self.est, 'est') else None)]] info_df = pd.DataFrame(info) if is_notebook(): with pd.option_context('display.max_colwidth', 1000, 'display.width', None, 'display.precision', 4): display(HTML(info_df.to_html(index=False, header=False))) return info
def show_in_notebook(self): "\n \n " if (self._underlying_model == 'automl'): info = [['Model Name', self.name], ['Target Variable', self.target], ['Selected Algorithm', self.est.selected_model_], ['Task', self.est.task], ['Training Dataset Size', self.est.train_shape_], ['CV', self.est.cv_], ['Optimization Metric', self.est.score_metric], ['Selected Hyperparameters', self.est.selected_model_params_], ['Initial Number of Features', self.est.train_shape_[1]], ['Initial Features', self.est.pipeline.orig_feature_names], ['Selected Number of Features', len(self.est.selected_features_names_)], ['Selected Features', self.est.selected_features_names_]] else: info = [['Model Name', self.name], ['Target Variable', (self.target if (self.target is not None) else 'not available from estimator')], ['Selected Hyperparameters', (self.est.get_params() if hasattr(self.est, 'get_params') else None)], ['Framework', self.est.__class__.__module__], ['Estimator Class', self.est.__class__.__name__], ['Contained Estimator', (self.est.est.__class__.__name__ if hasattr(self.est, 'est') else None)]] info_df = pd.DataFrame(info) if is_notebook(): with pd.option_context('display.max_colwidth', 1000, 'display.width', None, 'display.precision', 4): display(HTML(info_df.to_html(index=False, header=False))) return info<|docstring|>Describe the model by showing it's properties<|endoftext|>
9c1b0370ede2182ebeec9236f95e36aff9ec85cb8c571a665a29228cc1fcf430
def normalize_graph(graph, normalized=True, add_self_loops=True): "Normalized the graph's adjacency matrix in the scipy sparse matrix format.\n\n Args:\n graph: A scipy sparse adjacency matrix of the input graph.\n normalized: If True, uses the normalized Laplacian formulation. Otherwise,\n use the unnormalized Laplacian construction.\n add_self_loops: If True, adds a one-diagonal corresponding to self-loops in\n the graph.\n\n Returns:\n A scipy sparse matrix containing the normalized version of the input graph.\n " if add_self_loops: graph = (graph + scipy.sparse.identity(graph.shape[0])) degree = np.squeeze(np.asarray(graph.sum(axis=1))) if normalized: with np.errstate(divide='ignore'): inverse_sqrt_degree = (1.0 / np.sqrt(degree)) inverse_sqrt_degree[(inverse_sqrt_degree == np.inf)] = 0 inverse_sqrt_degree = scipy.sparse.diags(inverse_sqrt_degree) return ((inverse_sqrt_degree @ graph) @ inverse_sqrt_degree) else: with np.errstate(divide='ignore'): inverse_degree = (1.0 / degree) inverse_degree[(inverse_degree == np.inf)] = 0 inverse_degree = scipy.sparse.diags(inverse_degree) return (inverse_degree @ graph)
Normalized the graph's adjacency matrix in the scipy sparse matrix format. Args: graph: A scipy sparse adjacency matrix of the input graph. normalized: If True, uses the normalized Laplacian formulation. Otherwise, use the unnormalized Laplacian construction. add_self_loops: If True, adds a one-diagonal corresponding to self-loops in the graph. Returns: A scipy sparse matrix containing the normalized version of the input graph.
graph_embedding/dmon/utils.py
normalize_graph
wy-go/google-research
23,901
python
def normalize_graph(graph, normalized=True, add_self_loops=True): "Normalized the graph's adjacency matrix in the scipy sparse matrix format.\n\n Args:\n graph: A scipy sparse adjacency matrix of the input graph.\n normalized: If True, uses the normalized Laplacian formulation. Otherwise,\n use the unnormalized Laplacian construction.\n add_self_loops: If True, adds a one-diagonal corresponding to self-loops in\n the graph.\n\n Returns:\n A scipy sparse matrix containing the normalized version of the input graph.\n " if add_self_loops: graph = (graph + scipy.sparse.identity(graph.shape[0])) degree = np.squeeze(np.asarray(graph.sum(axis=1))) if normalized: with np.errstate(divide='ignore'): inverse_sqrt_degree = (1.0 / np.sqrt(degree)) inverse_sqrt_degree[(inverse_sqrt_degree == np.inf)] = 0 inverse_sqrt_degree = scipy.sparse.diags(inverse_sqrt_degree) return ((inverse_sqrt_degree @ graph) @ inverse_sqrt_degree) else: with np.errstate(divide='ignore'): inverse_degree = (1.0 / degree) inverse_degree[(inverse_degree == np.inf)] = 0 inverse_degree = scipy.sparse.diags(inverse_degree) return (inverse_degree @ graph)
def normalize_graph(graph, normalized=True, add_self_loops=True): "Normalized the graph's adjacency matrix in the scipy sparse matrix format.\n\n Args:\n graph: A scipy sparse adjacency matrix of the input graph.\n normalized: If True, uses the normalized Laplacian formulation. Otherwise,\n use the unnormalized Laplacian construction.\n add_self_loops: If True, adds a one-diagonal corresponding to self-loops in\n the graph.\n\n Returns:\n A scipy sparse matrix containing the normalized version of the input graph.\n " if add_self_loops: graph = (graph + scipy.sparse.identity(graph.shape[0])) degree = np.squeeze(np.asarray(graph.sum(axis=1))) if normalized: with np.errstate(divide='ignore'): inverse_sqrt_degree = (1.0 / np.sqrt(degree)) inverse_sqrt_degree[(inverse_sqrt_degree == np.inf)] = 0 inverse_sqrt_degree = scipy.sparse.diags(inverse_sqrt_degree) return ((inverse_sqrt_degree @ graph) @ inverse_sqrt_degree) else: with np.errstate(divide='ignore'): inverse_degree = (1.0 / degree) inverse_degree[(inverse_degree == np.inf)] = 0 inverse_degree = scipy.sparse.diags(inverse_degree) return (inverse_degree @ graph)<|docstring|>Normalized the graph's adjacency matrix in the scipy sparse matrix format. Args: graph: A scipy sparse adjacency matrix of the input graph. normalized: If True, uses the normalized Laplacian formulation. Otherwise, use the unnormalized Laplacian construction. add_self_loops: If True, adds a one-diagonal corresponding to self-loops in the graph. Returns: A scipy sparse matrix containing the normalized version of the input graph.<|endoftext|>
b60e2692627c1d733f36e0289af4f2398075181ec2eb066845ff9089959e9d8d
def main(argv=None): 'script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n ' if (not argv): argv = sys.argv parser = E.OptionParser(version='%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $', usage=globals()['__doc__']) parser.add_option('-i', '--test-option', dest='test_option', type='string', help='test option [default=%default].') parser.set_defaults(test_option='test') (options, args) = E.Start(parser, argv=argv) files = glob.glob(os.path.join(os.path.dirname(__file__), '*.pyx')) (ninput, nskipped, noutput) = (0, 0, 0) for f in files: E.info(('rebuilding %s' % f)) ninput += 1 (prefix, suffix) = os.path.splitext(f) for ext in ('.c', '.pyxbldc'): try: os.remove((prefix + ext)) except OSError: pass (dirname, basename) = os.path.split(prefix) assert basename.startswith('_') scriptname = (os.path.join(dirname, basename[1:]) + '.py') if (not os.path.exists(scriptname)): E.warn(('script %s does not exist - skipped' % scriptname)) nskipped += 1 continue E.info(('compiling %s' % scriptname)) os.system(('%s %s --help > /dev/null' % (sys.executable, scriptname))) noutput += 1 E.info(('ninput=%i, noutput=%i, nskipped=%i' % (ninput, noutput, nskipped))) E.Stop()
script main. parses command line options in sys.argv, unless *argv* is given.
CGAT/scripts/cgat_rebuild_extensions.py
main
CGATOxford/cgat
87
python
def main(argv=None): 'script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n ' if (not argv): argv = sys.argv parser = E.OptionParser(version='%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $', usage=globals()['__doc__']) parser.add_option('-i', '--test-option', dest='test_option', type='string', help='test option [default=%default].') parser.set_defaults(test_option='test') (options, args) = E.Start(parser, argv=argv) files = glob.glob(os.path.join(os.path.dirname(__file__), '*.pyx')) (ninput, nskipped, noutput) = (0, 0, 0) for f in files: E.info(('rebuilding %s' % f)) ninput += 1 (prefix, suffix) = os.path.splitext(f) for ext in ('.c', '.pyxbldc'): try: os.remove((prefix + ext)) except OSError: pass (dirname, basename) = os.path.split(prefix) assert basename.startswith('_') scriptname = (os.path.join(dirname, basename[1:]) + '.py') if (not os.path.exists(scriptname)): E.warn(('script %s does not exist - skipped' % scriptname)) nskipped += 1 continue E.info(('compiling %s' % scriptname)) os.system(('%s %s --help > /dev/null' % (sys.executable, scriptname))) noutput += 1 E.info(('ninput=%i, noutput=%i, nskipped=%i' % (ninput, noutput, nskipped))) E.Stop()
def main(argv=None): 'script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n ' if (not argv): argv = sys.argv parser = E.OptionParser(version='%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $', usage=globals()['__doc__']) parser.add_option('-i', '--test-option', dest='test_option', type='string', help='test option [default=%default].') parser.set_defaults(test_option='test') (options, args) = E.Start(parser, argv=argv) files = glob.glob(os.path.join(os.path.dirname(__file__), '*.pyx')) (ninput, nskipped, noutput) = (0, 0, 0) for f in files: E.info(('rebuilding %s' % f)) ninput += 1 (prefix, suffix) = os.path.splitext(f) for ext in ('.c', '.pyxbldc'): try: os.remove((prefix + ext)) except OSError: pass (dirname, basename) = os.path.split(prefix) assert basename.startswith('_') scriptname = (os.path.join(dirname, basename[1:]) + '.py') if (not os.path.exists(scriptname)): E.warn(('script %s does not exist - skipped' % scriptname)) nskipped += 1 continue E.info(('compiling %s' % scriptname)) os.system(('%s %s --help > /dev/null' % (sys.executable, scriptname))) noutput += 1 E.info(('ninput=%i, noutput=%i, nskipped=%i' % (ninput, noutput, nskipped))) E.Stop()<|docstring|>script main. parses command line options in sys.argv, unless *argv* is given.<|endoftext|>
c8aafbfc863321c798ad75ec9da334caa77fb38140b5d227d495fa74d114f33e
def receive(self, noparse=0): 'Receives data from Scratch\n Arguments:\n noparse: 0 to pass message through a parser and return the message as a data structure\n 1 to not parse message, but format as a string\n 2 to not parse message and not format as a string (returns raw message)\n ' try: mess = self.connection.recv(4) if (not mess): return None (messlen,) = struct.unpack('!I', mess) messlen += 4 while (len(mess) < messlen): mess += self.connection.recv((messlen - len(mess))) except socket.error as exc: (errno, message) = exc.args raise ScratchConnectionError(errno, message) if (not mess): return None if (noparse == 0): return self._parse_message(repr(mess)) if (noparse == 1): return repr(mess) elif (noparse == 2): return mess else: return self._parse_message(repr(mess))
Receives data from Scratch Arguments: noparse: 0 to pass message through a parser and return the message as a data structure 1 to not parse message, but format as a string 2 to not parse message and not format as a string (returns raw message)
scratch/__init__.py
receive
qihboy/py-scratch
2
python
def receive(self, noparse=0): 'Receives data from Scratch\n Arguments:\n noparse: 0 to pass message through a parser and return the message as a data structure\n 1 to not parse message, but format as a string\n 2 to not parse message and not format as a string (returns raw message)\n ' try: mess = self.connection.recv(4) if (not mess): return None (messlen,) = struct.unpack('!I', mess) messlen += 4 while (len(mess) < messlen): mess += self.connection.recv((messlen - len(mess))) except socket.error as exc: (errno, message) = exc.args raise ScratchConnectionError(errno, message) if (not mess): return None if (noparse == 0): return self._parse_message(repr(mess)) if (noparse == 1): return repr(mess) elif (noparse == 2): return mess else: return self._parse_message(repr(mess))
def receive(self, noparse=0): 'Receives data from Scratch\n Arguments:\n noparse: 0 to pass message through a parser and return the message as a data structure\n 1 to not parse message, but format as a string\n 2 to not parse message and not format as a string (returns raw message)\n ' try: mess = self.connection.recv(4) if (not mess): return None (messlen,) = struct.unpack('!I', mess) messlen += 4 while (len(mess) < messlen): mess += self.connection.recv((messlen - len(mess))) except socket.error as exc: (errno, message) = exc.args raise ScratchConnectionError(errno, message) if (not mess): return None if (noparse == 0): return self._parse_message(repr(mess)) if (noparse == 1): return repr(mess) elif (noparse == 2): return mess else: return self._parse_message(repr(mess))<|docstring|>Receives data from Scratch Arguments: noparse: 0 to pass message through a parser and return the message as a data structure 1 to not parse message, but format as a string 2 to not parse message and not format as a string (returns raw message)<|endoftext|>
b8bfe5612219fcba223c3a3d1ff362590aeeb9482779b81a873863b219340817
def sensorupdate(self, data): 'Takes a dictionary and writes a message using the keys as sensors, and the values as the update values' if (not isinstance(data, dict)): raise TypeError('Expected a dict') message = 'sensor-update' for (k, v) in data.items(): message += (' "%s" %s' % (k, v)) self._send(message)
Takes a dictionary and writes a message using the keys as sensors, and the values as the update values
scratch/__init__.py
sensorupdate
qihboy/py-scratch
2
python
def sensorupdate(self, data): if (not isinstance(data, dict)): raise TypeError('Expected a dict') message = 'sensor-update' for (k, v) in data.items(): message += (' "%s" %s' % (k, v)) self._send(message)
def sensorupdate(self, data): if (not isinstance(data, dict)): raise TypeError('Expected a dict') message = 'sensor-update' for (k, v) in data.items(): message += (' "%s" %s' % (k, v)) self._send(message)<|docstring|>Takes a dictionary and writes a message using the keys as sensors, and the values as the update values<|endoftext|>