body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
6c87e72bb4ec9f50619531de6e35cbe6471441657edf94fdf1f4dfc081189e6e
|
def __actual_send_message(bot, chat_id, text, parse_mode=None, disable_web_page_preview=None, disable_notification=False, reply_to_message_id=None, reply_markup=None, timeout=None):
'\n Try sending markdown and revert to normal text if broken\n :param bot:\n :param chat_id:\n :param text:\n :return:\n '
if (len(text) >= telegram.constants.MAX_MESSAGE_LENGTH):
token = '[...]'
text = (text[:(- len(token))] + token)
try:
bot.sendMessage(chat_id, text=text, parse_mode=ParseMode.MARKDOWN, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)
except TelegramError:
bot.sendMessage(chat_id, text=text, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)
|
Try sending markdown and revert to normal text if broken
:param bot:
:param chat_id:
:param text:
:return:
|
bot_app/messages.py
|
__actual_send_message
|
arthurdk/tinder-telegram-bot
| 16
|
python
|
def __actual_send_message(bot, chat_id, text, parse_mode=None, disable_web_page_preview=None, disable_notification=False, reply_to_message_id=None, reply_markup=None, timeout=None):
'\n Try sending markdown and revert to normal text if broken\n :param bot:\n :param chat_id:\n :param text:\n :return:\n '
if (len(text) >= telegram.constants.MAX_MESSAGE_LENGTH):
token = '[...]'
text = (text[:(- len(token))] + token)
try:
bot.sendMessage(chat_id, text=text, parse_mode=ParseMode.MARKDOWN, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)
except TelegramError:
bot.sendMessage(chat_id, text=text, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)
|
def __actual_send_message(bot, chat_id, text, parse_mode=None, disable_web_page_preview=None, disable_notification=False, reply_to_message_id=None, reply_markup=None, timeout=None):
'\n Try sending markdown and revert to normal text if broken\n :param bot:\n :param chat_id:\n :param text:\n :return:\n '
if (len(text) >= telegram.constants.MAX_MESSAGE_LENGTH):
token = '[...]'
text = (text[:(- len(token))] + token)
try:
bot.sendMessage(chat_id, text=text, parse_mode=ParseMode.MARKDOWN, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)
except TelegramError:
bot.sendMessage(chat_id, text=text, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)<|docstring|>Try sending markdown and revert to normal text if broken
:param bot:
:param chat_id:
:param text:
:return:<|endoftext|>
|
c5fa222b7381ce3cf04c6536a28b75ea00a3cd5b545705b74a85d5b635be7ebd
|
def send_private_message(bot, user_id, text):
'\n Return True if bot was able to actually send private message\n :param bot:\n :param user_id:\n :param text:\n :return:\n '
try:
__actual_send_message(bot=bot, chat_id=user_id, text=text)
return True
except TelegramError as e:
if (e.message == 'Unauthorized'):
return False
|
Return True if bot was able to actually send private message
:param bot:
:param user_id:
:param text:
:return:
|
bot_app/messages.py
|
send_private_message
|
arthurdk/tinder-telegram-bot
| 16
|
python
|
def send_private_message(bot, user_id, text):
'\n Return True if bot was able to actually send private message\n :param bot:\n :param user_id:\n :param text:\n :return:\n '
try:
__actual_send_message(bot=bot, chat_id=user_id, text=text)
return True
except TelegramError as e:
if (e.message == 'Unauthorized'):
return False
|
def send_private_message(bot, user_id, text):
'\n Return True if bot was able to actually send private message\n :param bot:\n :param user_id:\n :param text:\n :return:\n '
try:
__actual_send_message(bot=bot, chat_id=user_id, text=text)
return True
except TelegramError as e:
if (e.message == 'Unauthorized'):
return False<|docstring|>Return True if bot was able to actually send private message
:param bot:
:param user_id:
:param text:
:return:<|endoftext|>
|
14b372e1e7aa5b3738736112c7b40f48050e1326973e634d2733111d113b9f76
|
def send_private_photo(bot, user_id, url, caption):
'\n Return True if bot was able to actually send private photo\n :param caption:\n :return:\n :param bot:\n :param user_id:\n :param url:\n :return:\n '
if (len(caption) >= telegram.constants.MAX_CAPTION_LENGTH):
token = '[...]'
caption = (caption[:(- len(token))] + token)
try:
bot.sendPhoto(user_id, photo=url, caption=caption)
return True
except Unauthorized:
return False
except TelegramError as e:
pass
|
Return True if bot was able to actually send private photo
:param caption:
:return:
:param bot:
:param user_id:
:param url:
:return:
|
bot_app/messages.py
|
send_private_photo
|
arthurdk/tinder-telegram-bot
| 16
|
python
|
def send_private_photo(bot, user_id, url, caption):
'\n Return True if bot was able to actually send private photo\n :param caption:\n :return:\n :param bot:\n :param user_id:\n :param url:\n :return:\n '
if (len(caption) >= telegram.constants.MAX_CAPTION_LENGTH):
token = '[...]'
caption = (caption[:(- len(token))] + token)
try:
bot.sendPhoto(user_id, photo=url, caption=caption)
return True
except Unauthorized:
return False
except TelegramError as e:
pass
|
def send_private_photo(bot, user_id, url, caption):
'\n Return True if bot was able to actually send private photo\n :param caption:\n :return:\n :param bot:\n :param user_id:\n :param url:\n :return:\n '
if (len(caption) >= telegram.constants.MAX_CAPTION_LENGTH):
token = '[...]'
caption = (caption[:(- len(token))] + token)
try:
bot.sendPhoto(user_id, photo=url, caption=caption)
return True
except Unauthorized:
return False
except TelegramError as e:
pass<|docstring|>Return True if bot was able to actually send private photo
:param caption:
:return:
:param bot:
:param user_id:
:param url:
:return:<|endoftext|>
|
0051a61000453e8c752185e49171c85a526b661a325cb18fb06d2094c6f3413d
|
def send_private_link(bot, user_id, url):
'\n Return True if bot was able to actually send private photo\n :param caption:\n :return:\n :param bot:\n :param user_id:\n :param url:\n :return:\n '
try:
__actual_send_message(bot=bot, chat_id=user_id, text=(url + ' '))
return True
except Unauthorized:
return False
except TelegramError as e:
pass
|
Return True if bot was able to actually send private photo
:param caption:
:return:
:param bot:
:param user_id:
:param url:
:return:
|
bot_app/messages.py
|
send_private_link
|
arthurdk/tinder-telegram-bot
| 16
|
python
|
def send_private_link(bot, user_id, url):
'\n Return True if bot was able to actually send private photo\n :param caption:\n :return:\n :param bot:\n :param user_id:\n :param url:\n :return:\n '
try:
__actual_send_message(bot=bot, chat_id=user_id, text=(url + ' '))
return True
except Unauthorized:
return False
except TelegramError as e:
pass
|
def send_private_link(bot, user_id, url):
'\n Return True if bot was able to actually send private photo\n :param caption:\n :return:\n :param bot:\n :param user_id:\n :param url:\n :return:\n '
try:
__actual_send_message(bot=bot, chat_id=user_id, text=(url + ' '))
return True
except Unauthorized:
return False
except TelegramError as e:
pass<|docstring|>Return True if bot was able to actually send private photo
:param caption:
:return:
:param bot:
:param user_id:
:param url:
:return:<|endoftext|>
|
3710802e3c2c66abdaea026de06fe48da7aed4ea0c78bb3257946d8db295c7c4
|
def notify_send_token(bot, chat_id, reply_to_message_id, is_group, group_name, reply_markup=[[]]):
'\n\n :param bot:\n :param chat_id:\n :param reply_to_message_id:\n :param is_group:\n :param group_name:\n :param reply_markup:\n :return:\n '
msg = (messages['send_token'] % bot.name)
if is_group:
msg += (' for the group %s' % group_name)
__actual_send_message(bot=bot, chat_id=chat_id, text=msg, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, parse_mode=ParseMode.MARKDOWN)
|
:param bot:
:param chat_id:
:param reply_to_message_id:
:param is_group:
:param group_name:
:param reply_markup:
:return:
|
bot_app/messages.py
|
notify_send_token
|
arthurdk/tinder-telegram-bot
| 16
|
python
|
def notify_send_token(bot, chat_id, reply_to_message_id, is_group, group_name, reply_markup=[[]]):
'\n\n :param bot:\n :param chat_id:\n :param reply_to_message_id:\n :param is_group:\n :param group_name:\n :param reply_markup:\n :return:\n '
msg = (messages['send_token'] % bot.name)
if is_group:
msg += (' for the group %s' % group_name)
__actual_send_message(bot=bot, chat_id=chat_id, text=msg, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, parse_mode=ParseMode.MARKDOWN)
|
def notify_send_token(bot, chat_id, reply_to_message_id, is_group, group_name, reply_markup=[[]]):
'\n\n :param bot:\n :param chat_id:\n :param reply_to_message_id:\n :param is_group:\n :param group_name:\n :param reply_markup:\n :return:\n '
msg = (messages['send_token'] % bot.name)
if is_group:
msg += (' for the group %s' % group_name)
__actual_send_message(bot=bot, chat_id=chat_id, text=msg, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, parse_mode=ParseMode.MARKDOWN)<|docstring|>:param bot:
:param chat_id:
:param reply_to_message_id:
:param is_group:
:param group_name:
:param reply_markup:
:return:<|endoftext|>
|
1d6272f3be7673a2892bc5b783819c2b79357439c58ee215ad1ca1f8cfa6674f
|
def send_custom_message(bot, chat_id, message, parse_mode=None, disable_web_page_preview=None, disable_notification=False, reply_to_message_id=None, reply_markup=None, timeout=None):
'\n Send a custom message (not predefined)\n :param timeout:\n :param reply_markup:\n :param reply_to_message_id:\n :param parse_mode:\n :param disable_notification:\n :param disable_web_page_preview:\n :param bot:\n :param chat_id:\n :param message:\n :return:\n '
__actual_send_message(bot, chat_id=chat_id, text=message, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)
|
Send a custom message (not predefined)
:param timeout:
:param reply_markup:
:param reply_to_message_id:
:param parse_mode:
:param disable_notification:
:param disable_web_page_preview:
:param bot:
:param chat_id:
:param message:
:return:
|
bot_app/messages.py
|
send_custom_message
|
arthurdk/tinder-telegram-bot
| 16
|
python
|
def send_custom_message(bot, chat_id, message, parse_mode=None, disable_web_page_preview=None, disable_notification=False, reply_to_message_id=None, reply_markup=None, timeout=None):
'\n Send a custom message (not predefined)\n :param timeout:\n :param reply_markup:\n :param reply_to_message_id:\n :param parse_mode:\n :param disable_notification:\n :param disable_web_page_preview:\n :param bot:\n :param chat_id:\n :param message:\n :return:\n '
__actual_send_message(bot, chat_id=chat_id, text=message, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)
|
def send_custom_message(bot, chat_id, message, parse_mode=None, disable_web_page_preview=None, disable_notification=False, reply_to_message_id=None, reply_markup=None, timeout=None):
'\n Send a custom message (not predefined)\n :param timeout:\n :param reply_markup:\n :param reply_to_message_id:\n :param parse_mode:\n :param disable_notification:\n :param disable_web_page_preview:\n :param bot:\n :param chat_id:\n :param message:\n :return:\n '
__actual_send_message(bot, chat_id=chat_id, text=message, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, timeout=timeout)<|docstring|>Send a custom message (not predefined)
:param timeout:
:param reply_markup:
:param reply_to_message_id:
:param parse_mode:
:param disable_notification:
:param disable_web_page_preview:
:param bot:
:param chat_id:
:param message:
:return:<|endoftext|>
|
0a4164e1ae5c1653bb4fc86e144f9b6d0857a2379d32e3f80a038b1e5f34ec94
|
def test_validate(self):
'\n Load and validate a graph from file\n :return:\n '
self.g.validate_graph()
|
Load and validate a graph from file
:return:
|
test/networkxx_pg_disjoint_test.py
|
test_validate
|
fabric-testbed/InformationModel
| 6
|
python
|
def test_validate(self):
'\n Load and validate a graph from file\n :return:\n '
self.g.validate_graph()
|
def test_validate(self):
'\n Load and validate a graph from file\n :return:\n '
self.g.validate_graph()<|docstring|>Load and validate a graph from file
:return:<|endoftext|>
|
a22aee5ac0963089749be58768a69e7f41979db8d7dc7c8251cd8fcb2553f905
|
def test_basic(self):
'\n Basic create/delete tests\n :return:\n '
nx_imp = nx_graph.NetworkXGraphImporter()
nx_pg = nx_graph.NetworkXPropertyGraph(graph_id='beef-beed', importer=nx_imp)
nx_pg.add_node(node_id='dead-beef', label=ABCPropertyGraphConstants.CLASS_NetworkNode)
nx_pg.add_node(node_id='beef-dead', label=ABCPropertyGraphConstants.CLASS_Component, props={'some_property': 'some_value'})
(_, props) = nx_pg.get_node_properties(node_id='beef-dead')
print(props)
assert (props.get('some_property', None) is not None)
nx_pg.unset_node_property(node_id='beef-dead', prop_name='some_property')
(_, props) = nx_pg.get_node_properties(node_id='beef-dead')
print(props)
assert (props.get('some_property', None) is None)
nx_pg.add_link(node_a='dead-beef', node_b='beef-dead', rel=ABCPropertyGraphConstants.REL_HAS, props={'some_prop': 2})
(lt, props) = nx_pg.get_link_properties(node_a='dead-beef', node_b='beef-dead')
assert (lt == ABCPropertyGraph.REL_HAS)
assert ('some_prop' in props.keys())
nx_pg.unset_link_property(node_a='dead-beef', node_b='beef-dead', kind=ABCPropertyGraph.REL_HAS, prop_name='some_prop')
(lt, props) = nx_pg.get_link_properties(node_a='dead-beef', node_b='beef-dead')
assert ('some_prop' not in props.keys())
nx_imp.delete_all_graphs()
|
Basic create/delete tests
:return:
|
test/networkxx_pg_disjoint_test.py
|
test_basic
|
fabric-testbed/InformationModel
| 6
|
python
|
def test_basic(self):
'\n Basic create/delete tests\n :return:\n '
nx_imp = nx_graph.NetworkXGraphImporter()
nx_pg = nx_graph.NetworkXPropertyGraph(graph_id='beef-beed', importer=nx_imp)
nx_pg.add_node(node_id='dead-beef', label=ABCPropertyGraphConstants.CLASS_NetworkNode)
nx_pg.add_node(node_id='beef-dead', label=ABCPropertyGraphConstants.CLASS_Component, props={'some_property': 'some_value'})
(_, props) = nx_pg.get_node_properties(node_id='beef-dead')
print(props)
assert (props.get('some_property', None) is not None)
nx_pg.unset_node_property(node_id='beef-dead', prop_name='some_property')
(_, props) = nx_pg.get_node_properties(node_id='beef-dead')
print(props)
assert (props.get('some_property', None) is None)
nx_pg.add_link(node_a='dead-beef', node_b='beef-dead', rel=ABCPropertyGraphConstants.REL_HAS, props={'some_prop': 2})
(lt, props) = nx_pg.get_link_properties(node_a='dead-beef', node_b='beef-dead')
assert (lt == ABCPropertyGraph.REL_HAS)
assert ('some_prop' in props.keys())
nx_pg.unset_link_property(node_a='dead-beef', node_b='beef-dead', kind=ABCPropertyGraph.REL_HAS, prop_name='some_prop')
(lt, props) = nx_pg.get_link_properties(node_a='dead-beef', node_b='beef-dead')
assert ('some_prop' not in props.keys())
nx_imp.delete_all_graphs()
|
def test_basic(self):
'\n Basic create/delete tests\n :return:\n '
nx_imp = nx_graph.NetworkXGraphImporter()
nx_pg = nx_graph.NetworkXPropertyGraph(graph_id='beef-beed', importer=nx_imp)
nx_pg.add_node(node_id='dead-beef', label=ABCPropertyGraphConstants.CLASS_NetworkNode)
nx_pg.add_node(node_id='beef-dead', label=ABCPropertyGraphConstants.CLASS_Component, props={'some_property': 'some_value'})
(_, props) = nx_pg.get_node_properties(node_id='beef-dead')
print(props)
assert (props.get('some_property', None) is not None)
nx_pg.unset_node_property(node_id='beef-dead', prop_name='some_property')
(_, props) = nx_pg.get_node_properties(node_id='beef-dead')
print(props)
assert (props.get('some_property', None) is None)
nx_pg.add_link(node_a='dead-beef', node_b='beef-dead', rel=ABCPropertyGraphConstants.REL_HAS, props={'some_prop': 2})
(lt, props) = nx_pg.get_link_properties(node_a='dead-beef', node_b='beef-dead')
assert (lt == ABCPropertyGraph.REL_HAS)
assert ('some_prop' in props.keys())
nx_pg.unset_link_property(node_a='dead-beef', node_b='beef-dead', kind=ABCPropertyGraph.REL_HAS, prop_name='some_prop')
(lt, props) = nx_pg.get_link_properties(node_a='dead-beef', node_b='beef-dead')
assert ('some_prop' not in props.keys())
nx_imp.delete_all_graphs()<|docstring|>Basic create/delete tests
:return:<|endoftext|>
|
0dab905332941384073842734258f3ccd942efebe676d2fc246f5ef2eb484bb0
|
def test_node_properties(self):
'\n Test node property manipulation\n :return:\n '
favs = self._find_favorite_nodes()
assert ((favs.get('Worker1'), None) is not None)
worker1 = favs['Worker1']
(worker1_labels, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ('NetworkNode' in worker1_labels)
assert (('Capacities' in worker1_props) and (worker1_props['Type'] == 'Server') and (worker1_props['Model'] == 'Dell R7525'))
with self.assertRaises(nx_graph.PropertyGraphQueryException):
self.g.update_node_property(node_id=worker1, prop_name='Class', prop_val='NewNetworkNode')
with self.assertRaises(nx_graph.PropertyGraphQueryException):
props = dict()
props['Class'] = 'NewClass'
self.g.update_node_properties(node_id=worker1, props=props)
with self.assertRaises(nx_graph.PropertyGraphQueryException):
self.g.update_nodes_property(prop_name='Class', prop_val='NewNetworkNode')
self.g.update_node_property(node_id=worker1, prop_name='Type', prop_val='NewServer')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert (worker1_props['Type'] == 'NewServer')
new_props = dict()
new_props['Type'] = 'Server'
new_props['RandomProp'] = 'RandomVal'
self.g.update_node_properties(node_id=worker1, props=new_props)
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ((worker1_props['Type'] == 'Server') and (worker1_props['RandomProp'] == 'RandomVal'))
self.g.update_nodes_property(prop_name='RandomProp', prop_val='NewRandomVal')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ((worker1_props['Type'] == 'Server') and (worker1_props['RandomProp'] == 'NewRandomVal'))
self.g.unset_node_property(node_id=worker1, prop_name='RandomProp')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert (worker1_props.get('RandomProp', None) is None)
|
Test node property manipulation
:return:
|
test/networkxx_pg_disjoint_test.py
|
test_node_properties
|
fabric-testbed/InformationModel
| 6
|
python
|
def test_node_properties(self):
'\n Test node property manipulation\n :return:\n '
favs = self._find_favorite_nodes()
assert ((favs.get('Worker1'), None) is not None)
worker1 = favs['Worker1']
(worker1_labels, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ('NetworkNode' in worker1_labels)
assert (('Capacities' in worker1_props) and (worker1_props['Type'] == 'Server') and (worker1_props['Model'] == 'Dell R7525'))
with self.assertRaises(nx_graph.PropertyGraphQueryException):
self.g.update_node_property(node_id=worker1, prop_name='Class', prop_val='NewNetworkNode')
with self.assertRaises(nx_graph.PropertyGraphQueryException):
props = dict()
props['Class'] = 'NewClass'
self.g.update_node_properties(node_id=worker1, props=props)
with self.assertRaises(nx_graph.PropertyGraphQueryException):
self.g.update_nodes_property(prop_name='Class', prop_val='NewNetworkNode')
self.g.update_node_property(node_id=worker1, prop_name='Type', prop_val='NewServer')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert (worker1_props['Type'] == 'NewServer')
new_props = dict()
new_props['Type'] = 'Server'
new_props['RandomProp'] = 'RandomVal'
self.g.update_node_properties(node_id=worker1, props=new_props)
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ((worker1_props['Type'] == 'Server') and (worker1_props['RandomProp'] == 'RandomVal'))
self.g.update_nodes_property(prop_name='RandomProp', prop_val='NewRandomVal')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ((worker1_props['Type'] == 'Server') and (worker1_props['RandomProp'] == 'NewRandomVal'))
self.g.unset_node_property(node_id=worker1, prop_name='RandomProp')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert (worker1_props.get('RandomProp', None) is None)
|
def test_node_properties(self):
'\n Test node property manipulation\n :return:\n '
favs = self._find_favorite_nodes()
assert ((favs.get('Worker1'), None) is not None)
worker1 = favs['Worker1']
(worker1_labels, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ('NetworkNode' in worker1_labels)
assert (('Capacities' in worker1_props) and (worker1_props['Type'] == 'Server') and (worker1_props['Model'] == 'Dell R7525'))
with self.assertRaises(nx_graph.PropertyGraphQueryException):
self.g.update_node_property(node_id=worker1, prop_name='Class', prop_val='NewNetworkNode')
with self.assertRaises(nx_graph.PropertyGraphQueryException):
props = dict()
props['Class'] = 'NewClass'
self.g.update_node_properties(node_id=worker1, props=props)
with self.assertRaises(nx_graph.PropertyGraphQueryException):
self.g.update_nodes_property(prop_name='Class', prop_val='NewNetworkNode')
self.g.update_node_property(node_id=worker1, prop_name='Type', prop_val='NewServer')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert (worker1_props['Type'] == 'NewServer')
new_props = dict()
new_props['Type'] = 'Server'
new_props['RandomProp'] = 'RandomVal'
self.g.update_node_properties(node_id=worker1, props=new_props)
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ((worker1_props['Type'] == 'Server') and (worker1_props['RandomProp'] == 'RandomVal'))
self.g.update_nodes_property(prop_name='RandomProp', prop_val='NewRandomVal')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert ((worker1_props['Type'] == 'Server') and (worker1_props['RandomProp'] == 'NewRandomVal'))
self.g.unset_node_property(node_id=worker1, prop_name='RandomProp')
(_, worker1_props) = self.g.get_node_properties(node_id=worker1)
assert (worker1_props.get('RandomProp', None) is None)<|docstring|>Test node property manipulation
:return:<|endoftext|>
|
25155b8db017ffbb1c8ee577d4e1b49c89d5418ba6a6ddf65abe337be331c7d0
|
def list(self, **kwargs):
'Retrieve a list of policies.\n\n :rtype: list of :class:`policy`.\n '
def paginate(params):
'Paginate policies, even if more than API limit.'
current_limit = int((params.get('limit') or 0))
url = ('/policies?%s' % parse.urlencode(params, True))
(policies, resq) = self._list(url, 'policies')
for policy in policies:
(yield policy)
num_policies = len(policies)
remaining_limit = (current_limit - num_policies)
if ((remaining_limit > 0) and (num_policies > 0)):
params['limit'] = remaining_limit
params['marker'] = policy.id
for policy in paginate(params):
(yield policy)
params = {}
if ('filters' in kwargs):
filters = kwargs.pop('filters')
params.update(filters)
for (key, value) in six.iteritems(kwargs):
if value:
params[key] = value
return paginate(params)
|
Retrieve a list of policies.
:rtype: list of :class:`policy`.
|
bileanclient/v1/policies.py
|
list
|
lvdongbing/python-bileanclient-1
| 0
|
python
|
def list(self, **kwargs):
'Retrieve a list of policies.\n\n :rtype: list of :class:`policy`.\n '
def paginate(params):
'Paginate policies, even if more than API limit.'
current_limit = int((params.get('limit') or 0))
url = ('/policies?%s' % parse.urlencode(params, True))
(policies, resq) = self._list(url, 'policies')
for policy in policies:
(yield policy)
num_policies = len(policies)
remaining_limit = (current_limit - num_policies)
if ((remaining_limit > 0) and (num_policies > 0)):
params['limit'] = remaining_limit
params['marker'] = policy.id
for policy in paginate(params):
(yield policy)
params = {}
if ('filters' in kwargs):
filters = kwargs.pop('filters')
params.update(filters)
for (key, value) in six.iteritems(kwargs):
if value:
params[key] = value
return paginate(params)
|
def list(self, **kwargs):
'Retrieve a list of policies.\n\n :rtype: list of :class:`policy`.\n '
def paginate(params):
'Paginate policies, even if more than API limit.'
current_limit = int((params.get('limit') or 0))
url = ('/policies?%s' % parse.urlencode(params, True))
(policies, resq) = self._list(url, 'policies')
for policy in policies:
(yield policy)
num_policies = len(policies)
remaining_limit = (current_limit - num_policies)
if ((remaining_limit > 0) and (num_policies > 0)):
params['limit'] = remaining_limit
params['marker'] = policy.id
for policy in paginate(params):
(yield policy)
params = {}
if ('filters' in kwargs):
filters = kwargs.pop('filters')
params.update(filters)
for (key, value) in six.iteritems(kwargs):
if value:
params[key] = value
return paginate(params)<|docstring|>Retrieve a list of policies.
:rtype: list of :class:`policy`.<|endoftext|>
|
1c6df84a3e447ac30dfdaf0b85ce761c2c1925a144f686c5a3df5adc964d143d
|
def create(self, **kwargs):
'Create a new policy.'
(resq, body) = self.client.post(url, data=kwargs)
return self.resource_class(self, body.get('policy'), loaded=True)
|
Create a new policy.
|
bileanclient/v1/policies.py
|
create
|
lvdongbing/python-bileanclient-1
| 0
|
python
|
def create(self, **kwargs):
(resq, body) = self.client.post(url, data=kwargs)
return self.resource_class(self, body.get('policy'), loaded=True)
|
def create(self, **kwargs):
(resq, body) = self.client.post(url, data=kwargs)
return self.resource_class(self, body.get('policy'), loaded=True)<|docstring|>Create a new policy.<|endoftext|>
|
9cb64b1ad9bee1288b9d8bf565eac44e94cbb56d8d54346d656baa16861be4a5
|
def get(self, policy_id):
'Get a specific policy.'
url = ('/policies/%s' % parse.quote(str(policy_id)))
(resq, body) = self.client.get(url)
return self.resource_class(self, body.get('policy'), loaded=True)
|
Get a specific policy.
|
bileanclient/v1/policies.py
|
get
|
lvdongbing/python-bileanclient-1
| 0
|
python
|
def get(self, policy_id):
url = ('/policies/%s' % parse.quote(str(policy_id)))
(resq, body) = self.client.get(url)
return self.resource_class(self, body.get('policy'), loaded=True)
|
def get(self, policy_id):
url = ('/policies/%s' % parse.quote(str(policy_id)))
(resq, body) = self.client.get(url)
return self.resource_class(self, body.get('policy'), loaded=True)<|docstring|>Get a specific policy.<|endoftext|>
|
f4e2a2ad773e276eb85d6abaaef814ff25f2996ccb83c145f2138f45a8aa0def
|
def action(self, policy_id, **kwargs):
'Perform specified action on a policy.'
url = ('/policies/%s/action' % parse.quote(str(policy_id)))
(resq, body) = self.client.post(url, data=kwargs)
return self.resource_class(self, body.get('policy'), loaded=True)
|
Perform specified action on a policy.
|
bileanclient/v1/policies.py
|
action
|
lvdongbing/python-bileanclient-1
| 0
|
python
|
def action(self, policy_id, **kwargs):
url = ('/policies/%s/action' % parse.quote(str(policy_id)))
(resq, body) = self.client.post(url, data=kwargs)
return self.resource_class(self, body.get('policy'), loaded=True)
|
def action(self, policy_id, **kwargs):
url = ('/policies/%s/action' % parse.quote(str(policy_id)))
(resq, body) = self.client.post(url, data=kwargs)
return self.resource_class(self, body.get('policy'), loaded=True)<|docstring|>Perform specified action on a policy.<|endoftext|>
|
047731ebc3419bfb7e3aceba63221a82542d9925102213050fc0c638b66cbf01
|
def delete(self, policy_id):
'Delete a specific policy.'
return self._delete(('/policies/%s' % parse.quote(str(policy_id))))
|
Delete a specific policy.
|
bileanclient/v1/policies.py
|
delete
|
lvdongbing/python-bileanclient-1
| 0
|
python
|
def delete(self, policy_id):
return self._delete(('/policies/%s' % parse.quote(str(policy_id))))
|
def delete(self, policy_id):
return self._delete(('/policies/%s' % parse.quote(str(policy_id))))<|docstring|>Delete a specific policy.<|endoftext|>
|
dcffb0865af96cb293acb647c18f92c0696ab7886468d5b0b46d68c3beff47e3
|
def paginate(params):
'Paginate policies, even if more than API limit.'
current_limit = int((params.get('limit') or 0))
url = ('/policies?%s' % parse.urlencode(params, True))
(policies, resq) = self._list(url, 'policies')
for policy in policies:
(yield policy)
num_policies = len(policies)
remaining_limit = (current_limit - num_policies)
if ((remaining_limit > 0) and (num_policies > 0)):
params['limit'] = remaining_limit
params['marker'] = policy.id
for policy in paginate(params):
(yield policy)
|
Paginate policies, even if more than API limit.
|
bileanclient/v1/policies.py
|
paginate
|
lvdongbing/python-bileanclient-1
| 0
|
python
|
def paginate(params):
current_limit = int((params.get('limit') or 0))
url = ('/policies?%s' % parse.urlencode(params, True))
(policies, resq) = self._list(url, 'policies')
for policy in policies:
(yield policy)
num_policies = len(policies)
remaining_limit = (current_limit - num_policies)
if ((remaining_limit > 0) and (num_policies > 0)):
params['limit'] = remaining_limit
params['marker'] = policy.id
for policy in paginate(params):
(yield policy)
|
def paginate(params):
current_limit = int((params.get('limit') or 0))
url = ('/policies?%s' % parse.urlencode(params, True))
(policies, resq) = self._list(url, 'policies')
for policy in policies:
(yield policy)
num_policies = len(policies)
remaining_limit = (current_limit - num_policies)
if ((remaining_limit > 0) and (num_policies > 0)):
params['limit'] = remaining_limit
params['marker'] = policy.id
for policy in paginate(params):
(yield policy)<|docstring|>Paginate policies, even if more than API limit.<|endoftext|>
|
dade029aa525b2eb07be5840d7ae6179b1247b8332b4843e12037e75d7216d6f
|
def Article_Summary(news_article, ratio):
'Makes a news article on NPR smaller'
url = news_article
page = requests.get(url).text
soup = BeautifulSoup(page)
headline = soup.find('h1').get_text()
p_tags = soup.find_all('p')
p_tags_text = [tag.get_text().strip() for tag in p_tags]
sentence_list = [sentence for sentence in p_tags_text if (not ('\n' in sentence))]
sentence_list = [sentence for sentence in sentence_list if ('.' in sentence)]
article = ' '.join(sentence_list)
summary = summarize(article, ratio=ratio)
"# A clean output\n print(f'\nLength of original article: {len(article)}')\n print(f'Length of summary: {len(summary)}')\n print(f'Headline: {headline} \n')\n print(f'Article Summary:\n{textwrap.fill(summary, 120)}')"
return summary
|
Makes a news article on NPR smaller
|
scraper_functions.py
|
Article_Summary
|
ethanmjansen/BS4_Scraper
| 0
|
python
|
def Article_Summary(news_article, ratio):
url = news_article
page = requests.get(url).text
soup = BeautifulSoup(page)
headline = soup.find('h1').get_text()
p_tags = soup.find_all('p')
p_tags_text = [tag.get_text().strip() for tag in p_tags]
sentence_list = [sentence for sentence in p_tags_text if (not ('\n' in sentence))]
sentence_list = [sentence for sentence in sentence_list if ('.' in sentence)]
article = ' '.join(sentence_list)
summary = summarize(article, ratio=ratio)
"# A clean output\n print(f'\nLength of original article: {len(article)}')\n print(f'Length of summary: {len(summary)}')\n print(f'Headline: {headline} \n')\n print(f'Article Summary:\n{textwrap.fill(summary, 120)}')"
return summary
|
def Article_Summary(news_article, ratio):
url = news_article
page = requests.get(url).text
soup = BeautifulSoup(page)
headline = soup.find('h1').get_text()
p_tags = soup.find_all('p')
p_tags_text = [tag.get_text().strip() for tag in p_tags]
sentence_list = [sentence for sentence in p_tags_text if (not ('\n' in sentence))]
sentence_list = [sentence for sentence in sentence_list if ('.' in sentence)]
article = ' '.join(sentence_list)
summary = summarize(article, ratio=ratio)
"# A clean output\n print(f'\nLength of original article: {len(article)}')\n print(f'Length of summary: {len(summary)}')\n print(f'Headline: {headline} \n')\n print(f'Article Summary:\n{textwrap.fill(summary, 120)}')"
return summary<|docstring|>Makes a news article on NPR smaller<|endoftext|>
|
b800436fd1dc76ca1403a8788940f6e3823a38aead62237e9813a1a604ba86e0
|
def __init__(self, project: Project, target_name: str=str()):
'Profile constructor. Mainly just needs an initted Project object.\n\n Args:\n project (Project): initted project object\n target_name (str, optional): Mainly used in unit testing if you want to override the\n project name. Pretty useless in all other practice cases I think.\n Defaults to str().\n '
self.profile_name = project.project_name
self.target_name = target_name
self.profile_dict: Dict[(str, str)] = dict()
self.cannot_be_none = {'db_type', 'guser'}
self.profile_dir: Path = project.profile_dir
self.google_credentials_dir = Path(project.profile_dir, 'google').resolve()
self.read_profile()
logger.debug(f'PROFILE_DIR {self.profile_dir}')
logger.debug(f'PROFILE_NAME: {self.profile_name}')
|
Profile constructor. Mainly just needs an initted Project object.
Args:
project (Project): initted project object
target_name (str, optional): Mainly used in unit testing if you want to override the
project name. Pretty useless in all other practice cases I think.
Defaults to str().
|
sheetwork/core/config/profile.py
|
__init__
|
jflairie/sheetwork
| 1
|
python
|
def __init__(self, project: Project, target_name: str=str()):
'Profile constructor. Mainly just needs an initted Project object.\n\n Args:\n project (Project): initted project object\n target_name (str, optional): Mainly used in unit testing if you want to override the\n project name. Pretty useless in all other practice cases I think.\n Defaults to str().\n '
self.profile_name = project.project_name
self.target_name = target_name
self.profile_dict: Dict[(str, str)] = dict()
self.cannot_be_none = {'db_type', 'guser'}
self.profile_dir: Path = project.profile_dir
self.google_credentials_dir = Path(project.profile_dir, 'google').resolve()
self.read_profile()
logger.debug(f'PROFILE_DIR {self.profile_dir}')
logger.debug(f'PROFILE_NAME: {self.profile_name}')
|
def __init__(self, project: Project, target_name: str=str()):
'Profile constructor. Mainly just needs an initted Project object.\n\n Args:\n project (Project): initted project object\n target_name (str, optional): Mainly used in unit testing if you want to override the\n project name. Pretty useless in all other practice cases I think.\n Defaults to str().\n '
self.profile_name = project.project_name
self.target_name = target_name
self.profile_dict: Dict[(str, str)] = dict()
self.cannot_be_none = {'db_type', 'guser'}
self.profile_dir: Path = project.profile_dir
self.google_credentials_dir = Path(project.profile_dir, 'google').resolve()
self.read_profile()
logger.debug(f'PROFILE_DIR {self.profile_dir}')
logger.debug(f'PROFILE_NAME: {self.profile_name}')<|docstring|>Profile constructor. Mainly just needs an initted Project object.
Args:
project (Project): initted project object
target_name (str, optional): Mainly used in unit testing if you want to override the
project name. Pretty useless in all other practice cases I think.
Defaults to str().<|endoftext|>
|
c659ce2e01322c80f9f0ee298d53a9f7032af8fb5a66d21da9d94cc9cc6d02b0
|
def test_qim():
'\n tests the embed and detect methods of class QIM\n '
l = 10000
delta = 8
qim = QIM(delta)
while True:
x = np.random.randint(0, 255, l).astype(float)
msg = qim.random_msg(l)
y = qim.embed(x, msg)
(z_detected, msg_detected) = qim.detect(y)
print(x)
print(y)
print(z_detected)
print(msg)
print(msg_detected)
assert np.allclose(msg, msg_detected)
assert np.allclose(y, z_detected)
|
tests the embed and detect methods of class QIM
|
qim.py
|
test_qim
|
pl561/QuantizationIndexModulation
| 3
|
python
|
def test_qim():
'\n \n '
l = 10000
delta = 8
qim = QIM(delta)
while True:
x = np.random.randint(0, 255, l).astype(float)
msg = qim.random_msg(l)
y = qim.embed(x, msg)
(z_detected, msg_detected) = qim.detect(y)
print(x)
print(y)
print(z_detected)
print(msg)
print(msg_detected)
assert np.allclose(msg, msg_detected)
assert np.allclose(y, z_detected)
|
def test_qim():
'\n \n '
l = 10000
delta = 8
qim = QIM(delta)
while True:
x = np.random.randint(0, 255, l).astype(float)
msg = qim.random_msg(l)
y = qim.embed(x, msg)
(z_detected, msg_detected) = qim.detect(y)
print(x)
print(y)
print(z_detected)
print(msg)
print(msg_detected)
assert np.allclose(msg, msg_detected)
assert np.allclose(y, z_detected)<|docstring|>tests the embed and detect methods of class QIM<|endoftext|>
|
1188478b4d64f5477043d6044edfcc047a10551302ece08a1c29a94608dcce69
|
def embed(self, x, m):
'\n x is a vector of values to be quantized individually\n m is a binary vector of bits to be embeded\n returns: a quantized vector y\n '
x = x.astype(float)
d = self.delta
y = ((np.round((x / d)) * d) + ((((- 1) ** (m + 1)) * d) / 4.0))
return y
|
x is a vector of values to be quantized individually
m is a binary vector of bits to be embeded
returns: a quantized vector y
|
qim.py
|
embed
|
pl561/QuantizationIndexModulation
| 3
|
python
|
def embed(self, x, m):
'\n x is a vector of values to be quantized individually\n m is a binary vector of bits to be embeded\n returns: a quantized vector y\n '
x = x.astype(float)
d = self.delta
y = ((np.round((x / d)) * d) + ((((- 1) ** (m + 1)) * d) / 4.0))
return y
|
def embed(self, x, m):
'\n x is a vector of values to be quantized individually\n m is a binary vector of bits to be embeded\n returns: a quantized vector y\n '
x = x.astype(float)
d = self.delta
y = ((np.round((x / d)) * d) + ((((- 1) ** (m + 1)) * d) / 4.0))
return y<|docstring|>x is a vector of values to be quantized individually
m is a binary vector of bits to be embeded
returns: a quantized vector y<|endoftext|>
|
318aca4b8db3025d0a9b1899b47432a5a5ef4687e13293a60562d0d3c2054f5a
|
def detect(self, z):
'\n z is the received vector, potentially modified\n returns: a detected vector z_detected and a detected message m_detected\n '
shape = z.shape
z = z.flatten()
m_detected = np.zeros_like(z, dtype=float)
z_detected = np.zeros_like(z, dtype=float)
z0 = self.embed(z, 0)
z1 = self.embed(z, 1)
d0 = np.abs((z - z0))
d1 = np.abs((z - z1))
gen = zip(range(len(z_detected)), d0, d1)
for (i, dd0, dd1) in gen:
if (dd0 < dd1):
m_detected[i] = 0
z_detected[i] = z0[i]
else:
m_detected[i] = 1
z_detected[i] = z1[i]
z_detected = z_detected.reshape(shape)
m_detected = m_detected.reshape(shape)
return (z_detected, m_detected.astype(int))
|
z is the received vector, potentially modified
returns: a detected vector z_detected and a detected message m_detected
|
qim.py
|
detect
|
pl561/QuantizationIndexModulation
| 3
|
python
|
def detect(self, z):
'\n z is the received vector, potentially modified\n returns: a detected vector z_detected and a detected message m_detected\n '
shape = z.shape
z = z.flatten()
m_detected = np.zeros_like(z, dtype=float)
z_detected = np.zeros_like(z, dtype=float)
z0 = self.embed(z, 0)
z1 = self.embed(z, 1)
d0 = np.abs((z - z0))
d1 = np.abs((z - z1))
gen = zip(range(len(z_detected)), d0, d1)
for (i, dd0, dd1) in gen:
if (dd0 < dd1):
m_detected[i] = 0
z_detected[i] = z0[i]
else:
m_detected[i] = 1
z_detected[i] = z1[i]
z_detected = z_detected.reshape(shape)
m_detected = m_detected.reshape(shape)
return (z_detected, m_detected.astype(int))
|
def detect(self, z):
'\n z is the received vector, potentially modified\n returns: a detected vector z_detected and a detected message m_detected\n '
shape = z.shape
z = z.flatten()
m_detected = np.zeros_like(z, dtype=float)
z_detected = np.zeros_like(z, dtype=float)
z0 = self.embed(z, 0)
z1 = self.embed(z, 1)
d0 = np.abs((z - z0))
d1 = np.abs((z - z1))
gen = zip(range(len(z_detected)), d0, d1)
for (i, dd0, dd1) in gen:
if (dd0 < dd1):
m_detected[i] = 0
z_detected[i] = z0[i]
else:
m_detected[i] = 1
z_detected[i] = z1[i]
z_detected = z_detected.reshape(shape)
m_detected = m_detected.reshape(shape)
return (z_detected, m_detected.astype(int))<|docstring|>z is the received vector, potentially modified
returns: a detected vector z_detected and a detected message m_detected<|endoftext|>
|
fe6edd3153e69b94e5dda9d1750c66c6357ae73af90414b644bc1255b591d28a
|
def random_msg(self, l):
'\n returns: a random binary sequence of length l\n '
return np.random.choice((0, 1), l)
|
returns: a random binary sequence of length l
|
qim.py
|
random_msg
|
pl561/QuantizationIndexModulation
| 3
|
python
|
def random_msg(self, l):
'\n \n '
return np.random.choice((0, 1), l)
|
def random_msg(self, l):
'\n \n '
return np.random.choice((0, 1), l)<|docstring|>returns: a random binary sequence of length l<|endoftext|>
|
0efb90f989d09c65430adc23fe298c690176850c743c5208df5304961c78d7d7
|
def __init__(self, root, split='train', is_transform=True, img_size=(480, 640), task='depth'):
'__init__\n\n :param root:\n :param split:\n :param is_transform:\n :param img_size:\n '
self.root = root
self.split = split
self.num = 0
self.is_transform = is_transform
self.n_classes = 64
self.img_size = (img_size if isinstance(img_size, tuple) else (480, 640))
self.stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
if (self.split == 'train'):
self.path = os.path.join('/home/dataset/datasets/nyu2_depth/npy_data/')
self.files = ((os.listdir(self.path) + os.listdir('/home/dataset2/nyu/nyu1/train/')) + os.listdir('/home/dataset2/nyu/nyu2/train/'))
self.files.sort(key=(lambda x: int(x[:(- 4)])))
if (len(self.files) < 1):
raise Exception(('No files for %s found in %s' % (split, self.path)))
print(('Found %d in %s images' % (len(self.files), self.path)))
if (self.split == 'test'):
self.path = os.path.join('/home/dataset2/nyu/nyu2/test/')
self.files = os.listdir(self.path)
self.files.sort(key=(lambda x: int(x[:(- 4)])))
if (len(self.files) < 1):
raise Exception(('No files for %s found in %s' % (split, self.path)))
print(('Found %d in %s images' % (len(self.files), self.path)))
self.task = task
if (task == 'depth'):
self.d = 3
self.r = 5
else:
self.d = 5
self.r = 7
if (task == 'all'):
self.d = 3
self.r = 7
if (task == 'visualize'):
self.d = 3
self.r = 5
if (task == 'region'):
self.d = 3
self.r = 3
self.m = 3
self.length = self.__len__()
|
__init__
:param root:
:param split:
:param is_transform:
:param img_size:
|
back of code/RSCFN/rsden/loader/NYU # all augment.py
|
__init__
|
lidongyv/Monocular-depth-esitimation-with-region-support-cvpr
| 0
|
python
|
def __init__(self, root, split='train', is_transform=True, img_size=(480, 640), task='depth'):
'__init__\n\n :param root:\n :param split:\n :param is_transform:\n :param img_size:\n '
self.root = root
self.split = split
self.num = 0
self.is_transform = is_transform
self.n_classes = 64
self.img_size = (img_size if isinstance(img_size, tuple) else (480, 640))
self.stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
if (self.split == 'train'):
self.path = os.path.join('/home/dataset/datasets/nyu2_depth/npy_data/')
self.files = ((os.listdir(self.path) + os.listdir('/home/dataset2/nyu/nyu1/train/')) + os.listdir('/home/dataset2/nyu/nyu2/train/'))
self.files.sort(key=(lambda x: int(x[:(- 4)])))
if (len(self.files) < 1):
raise Exception(('No files for %s found in %s' % (split, self.path)))
print(('Found %d in %s images' % (len(self.files), self.path)))
if (self.split == 'test'):
self.path = os.path.join('/home/dataset2/nyu/nyu2/test/')
self.files = os.listdir(self.path)
self.files.sort(key=(lambda x: int(x[:(- 4)])))
if (len(self.files) < 1):
raise Exception(('No files for %s found in %s' % (split, self.path)))
print(('Found %d in %s images' % (len(self.files), self.path)))
self.task = task
if (task == 'depth'):
self.d = 3
self.r = 5
else:
self.d = 5
self.r = 7
if (task == 'all'):
self.d = 3
self.r = 7
if (task == 'visualize'):
self.d = 3
self.r = 5
if (task == 'region'):
self.d = 3
self.r = 3
self.m = 3
self.length = self.__len__()
|
def __init__(self, root, split='train', is_transform=True, img_size=(480, 640), task='depth'):
'__init__\n\n :param root:\n :param split:\n :param is_transform:\n :param img_size:\n '
self.root = root
self.split = split
self.num = 0
self.is_transform = is_transform
self.n_classes = 64
self.img_size = (img_size if isinstance(img_size, tuple) else (480, 640))
self.stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
if (self.split == 'train'):
self.path = os.path.join('/home/dataset/datasets/nyu2_depth/npy_data/')
self.files = ((os.listdir(self.path) + os.listdir('/home/dataset2/nyu/nyu1/train/')) + os.listdir('/home/dataset2/nyu/nyu2/train/'))
self.files.sort(key=(lambda x: int(x[:(- 4)])))
if (len(self.files) < 1):
raise Exception(('No files for %s found in %s' % (split, self.path)))
print(('Found %d in %s images' % (len(self.files), self.path)))
if (self.split == 'test'):
self.path = os.path.join('/home/dataset2/nyu/nyu2/test/')
self.files = os.listdir(self.path)
self.files.sort(key=(lambda x: int(x[:(- 4)])))
if (len(self.files) < 1):
raise Exception(('No files for %s found in %s' % (split, self.path)))
print(('Found %d in %s images' % (len(self.files), self.path)))
self.task = task
if (task == 'depth'):
self.d = 3
self.r = 5
else:
self.d = 5
self.r = 7
if (task == 'all'):
self.d = 3
self.r = 7
if (task == 'visualize'):
self.d = 3
self.r = 5
if (task == 'region'):
self.d = 3
self.r = 3
self.m = 3
self.length = self.__len__()<|docstring|>__init__
:param root:
:param split:
:param is_transform:
:param img_size:<|endoftext|>
|
6f5598e48ad430e6f3a9ad9c07085ccd494fdc316a964ba8500061252f95efef
|
def __len__(self):
'__len__'
return len(self.files)
|
__len__
|
back of code/RSCFN/rsden/loader/NYU # all augment.py
|
__len__
|
lidongyv/Monocular-depth-esitimation-with-region-support-cvpr
| 0
|
python
|
def (self):
return len(self.files)
|
def (self):
return len(self.files)<|docstring|>__len__<|endoftext|>
|
cfc0feb7e569446fef8c3dfecf25e866028c77445cc826c86af9b214148362c9
|
def __getitem__(self, index):
'__getitem__\n\n :param index:\n '
data = np.load(os.path.join(self.path, self.files[index]))
if (self.task == 'visualize'):
data = data[(0, :, :, :)]
img = data[(:, :, 0:3)]
depth = data[(:, :, self.d)]
region = data[(:, :, self.r)]
region = np.reshape(region, [1, region.shape[0], region.shape[1]])
segments = data[(:, :, self.m)]
segments = np.reshape(segments, [1, segments.shape[0], segments.shape[1]])
if (self.task == 'visualize'):
rgb = img
(img, depth, region, segments) = self.transform(img, depth, region, segments)
return (img, depth, segments, data)
if self.is_transform:
(img, depth, region, segments, image) = self.transform(img, depth, region, segments)
return (img, depth, region, segments, image)
|
__getitem__
:param index:
|
back of code/RSCFN/rsden/loader/NYU # all augment.py
|
__getitem__
|
lidongyv/Monocular-depth-esitimation-with-region-support-cvpr
| 0
|
python
|
def __getitem__(self, index):
'__getitem__\n\n :param index:\n '
data = np.load(os.path.join(self.path, self.files[index]))
if (self.task == 'visualize'):
data = data[(0, :, :, :)]
img = data[(:, :, 0:3)]
depth = data[(:, :, self.d)]
region = data[(:, :, self.r)]
region = np.reshape(region, [1, region.shape[0], region.shape[1]])
segments = data[(:, :, self.m)]
segments = np.reshape(segments, [1, segments.shape[0], segments.shape[1]])
if (self.task == 'visualize'):
rgb = img
(img, depth, region, segments) = self.transform(img, depth, region, segments)
return (img, depth, segments, data)
if self.is_transform:
(img, depth, region, segments, image) = self.transform(img, depth, region, segments)
return (img, depth, region, segments, image)
|
def __getitem__(self, index):
'__getitem__\n\n :param index:\n '
data = np.load(os.path.join(self.path, self.files[index]))
if (self.task == 'visualize'):
data = data[(0, :, :, :)]
img = data[(:, :, 0:3)]
depth = data[(:, :, self.d)]
region = data[(:, :, self.r)]
region = np.reshape(region, [1, region.shape[0], region.shape[1]])
segments = data[(:, :, self.m)]
segments = np.reshape(segments, [1, segments.shape[0], segments.shape[1]])
if (self.task == 'visualize'):
rgb = img
(img, depth, region, segments) = self.transform(img, depth, region, segments)
return (img, depth, segments, data)
if self.is_transform:
(img, depth, region, segments, image) = self.transform(img, depth, region, segments)
return (img, depth, region, segments, image)<|docstring|>__getitem__
:param index:<|endoftext|>
|
8ea3931b15e99533dfb1c10f95f0bfb35791883ab9633b5167d0687ec3971fbc
|
def transform(self, img, depth, region, segments):
'transform\n\n :param img:\n :param depth:\n '
img = img[(:, :, :)]
img = img.astype(np.float32)
depth = torch.from_numpy(depth).float().unsqueeze(0).unsqueeze(0)
segments = torch.from_numpy(segments).float().unsqueeze(0)
region = torch.from_numpy(region).float().unsqueeze(0)
topil = transforms.ToPILImage()
totensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
img = totensor(img)
image = (img.unsqueeze(0) + 0)
image = (image / torch.max(image))
sigma = random.uniform(0, 0.04)
if (self.split == 'train'):
scale = random.uniform(1, 1.2)
h = int((240 * scale))
w = int((320 * scale))
md = torch.max(depth)
mr = torch.max(region)
ms = torch.max(segments)
img = tf.resize(topil(img.squeeze(0)), [h, w])
image = tf.resize(topil(image.squeeze(0)), [h, w])
depth = tf.resize(topil((depth.squeeze(0) / md)), [h, w])
segments = tf.resize(topil((segments.squeeze(0) / ms)), [h, w])
region = tf.resize(topil((region.squeeze(0) / mr)), [h, w])
(i, j, h, w) = transforms.RandomCrop.get_params(img, output_size=[228, 304])
r = random.uniform((- 5), 5)
img = tf.rotate(img, r)
image = tf.rotate(image, r)
depth = tf.rotate(depth, r)
segments = tf.rotate(segments, r)
region = tf.rotate(region, r)
img = tf.crop(img, i, j, h, w)
image = tf.crop(image, i, j, h, w)
depth = tf.crop(depth, i, j, h, w)
segments = tf.crop(segments, i, j, h, w)
region = tf.crop(region, i, j, h, w)
if (random.random() > 0.5):
img = tf.hflip(img)
image = tf.hflip(image)
depth = tf.hflip(depth)
segments = tf.hflip(segments)
region = tf.hflip(region)
brightness = random.uniform(0, 0.2)
contrast = random.uniform(0, 0.2)
saturation = random.uniform(0, 0.2)
hue = random.uniform(0, 0.2)
color = transforms.ColorJitter(brightness, contrast, saturation, hue)
img = color(img)
gamma = random.uniform(0.7, 1.5)
img = tf.adjust_gamma(img, gamma)
r = random.uniform(0.8, 1.2)
g = random.uniform(0.8, 1.2)
b = random.uniform(0.8, 1.2)
img[(:, :, 0)] *= r
img[(:, :, 1)] *= g
img[(:, :, 2)] *= b
img = (totensor(img) / 255)
gaussian = (torch.zeros_like(img).normal_() * sigma)
img = (img + gaussian)
img = img.clamp(min=0, max=1)
image = img
img = normalize(img)
depth = ((totensor(depth) * md) / scale)
region = (totensor(region) * mr)
segments = (totensor(segments) * ms)
else:
img = img.unsqueeze(0)
img = F.interpolate(img, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(:, 6:(- 6), 8:(- 8))]
image = F.interpolate(image, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(:, 6:(- 6), 8:(- 8))]
depth = F.interpolate(depth, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
region = F.interpolate(region, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
segments = F.interpolate(segments, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
img = normalize(img)
return (img, depth, region, segments, image)
|
transform
:param img:
:param depth:
|
back of code/RSCFN/rsden/loader/NYU # all augment.py
|
transform
|
lidongyv/Monocular-depth-esitimation-with-region-support-cvpr
| 0
|
python
|
def transform(self, img, depth, region, segments):
'transform\n\n :param img:\n :param depth:\n '
img = img[(:, :, :)]
img = img.astype(np.float32)
depth = torch.from_numpy(depth).float().unsqueeze(0).unsqueeze(0)
segments = torch.from_numpy(segments).float().unsqueeze(0)
region = torch.from_numpy(region).float().unsqueeze(0)
topil = transforms.ToPILImage()
totensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
img = totensor(img)
image = (img.unsqueeze(0) + 0)
image = (image / torch.max(image))
sigma = random.uniform(0, 0.04)
if (self.split == 'train'):
scale = random.uniform(1, 1.2)
h = int((240 * scale))
w = int((320 * scale))
md = torch.max(depth)
mr = torch.max(region)
ms = torch.max(segments)
img = tf.resize(topil(img.squeeze(0)), [h, w])
image = tf.resize(topil(image.squeeze(0)), [h, w])
depth = tf.resize(topil((depth.squeeze(0) / md)), [h, w])
segments = tf.resize(topil((segments.squeeze(0) / ms)), [h, w])
region = tf.resize(topil((region.squeeze(0) / mr)), [h, w])
(i, j, h, w) = transforms.RandomCrop.get_params(img, output_size=[228, 304])
r = random.uniform((- 5), 5)
img = tf.rotate(img, r)
image = tf.rotate(image, r)
depth = tf.rotate(depth, r)
segments = tf.rotate(segments, r)
region = tf.rotate(region, r)
img = tf.crop(img, i, j, h, w)
image = tf.crop(image, i, j, h, w)
depth = tf.crop(depth, i, j, h, w)
segments = tf.crop(segments, i, j, h, w)
region = tf.crop(region, i, j, h, w)
if (random.random() > 0.5):
img = tf.hflip(img)
image = tf.hflip(image)
depth = tf.hflip(depth)
segments = tf.hflip(segments)
region = tf.hflip(region)
brightness = random.uniform(0, 0.2)
contrast = random.uniform(0, 0.2)
saturation = random.uniform(0, 0.2)
hue = random.uniform(0, 0.2)
color = transforms.ColorJitter(brightness, contrast, saturation, hue)
img = color(img)
gamma = random.uniform(0.7, 1.5)
img = tf.adjust_gamma(img, gamma)
r = random.uniform(0.8, 1.2)
g = random.uniform(0.8, 1.2)
b = random.uniform(0.8, 1.2)
img[(:, :, 0)] *= r
img[(:, :, 1)] *= g
img[(:, :, 2)] *= b
img = (totensor(img) / 255)
gaussian = (torch.zeros_like(img).normal_() * sigma)
img = (img + gaussian)
img = img.clamp(min=0, max=1)
image = img
img = normalize(img)
depth = ((totensor(depth) * md) / scale)
region = (totensor(region) * mr)
segments = (totensor(segments) * ms)
else:
img = img.unsqueeze(0)
img = F.interpolate(img, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(:, 6:(- 6), 8:(- 8))]
image = F.interpolate(image, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(:, 6:(- 6), 8:(- 8))]
depth = F.interpolate(depth, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
region = F.interpolate(region, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
segments = F.interpolate(segments, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
img = normalize(img)
return (img, depth, region, segments, image)
|
def transform(self, img, depth, region, segments):
'transform\n\n :param img:\n :param depth:\n '
img = img[(:, :, :)]
img = img.astype(np.float32)
depth = torch.from_numpy(depth).float().unsqueeze(0).unsqueeze(0)
segments = torch.from_numpy(segments).float().unsqueeze(0)
region = torch.from_numpy(region).float().unsqueeze(0)
topil = transforms.ToPILImage()
totensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
img = totensor(img)
image = (img.unsqueeze(0) + 0)
image = (image / torch.max(image))
sigma = random.uniform(0, 0.04)
if (self.split == 'train'):
scale = random.uniform(1, 1.2)
h = int((240 * scale))
w = int((320 * scale))
md = torch.max(depth)
mr = torch.max(region)
ms = torch.max(segments)
img = tf.resize(topil(img.squeeze(0)), [h, w])
image = tf.resize(topil(image.squeeze(0)), [h, w])
depth = tf.resize(topil((depth.squeeze(0) / md)), [h, w])
segments = tf.resize(topil((segments.squeeze(0) / ms)), [h, w])
region = tf.resize(topil((region.squeeze(0) / mr)), [h, w])
(i, j, h, w) = transforms.RandomCrop.get_params(img, output_size=[228, 304])
r = random.uniform((- 5), 5)
img = tf.rotate(img, r)
image = tf.rotate(image, r)
depth = tf.rotate(depth, r)
segments = tf.rotate(segments, r)
region = tf.rotate(region, r)
img = tf.crop(img, i, j, h, w)
image = tf.crop(image, i, j, h, w)
depth = tf.crop(depth, i, j, h, w)
segments = tf.crop(segments, i, j, h, w)
region = tf.crop(region, i, j, h, w)
if (random.random() > 0.5):
img = tf.hflip(img)
image = tf.hflip(image)
depth = tf.hflip(depth)
segments = tf.hflip(segments)
region = tf.hflip(region)
brightness = random.uniform(0, 0.2)
contrast = random.uniform(0, 0.2)
saturation = random.uniform(0, 0.2)
hue = random.uniform(0, 0.2)
color = transforms.ColorJitter(brightness, contrast, saturation, hue)
img = color(img)
gamma = random.uniform(0.7, 1.5)
img = tf.adjust_gamma(img, gamma)
r = random.uniform(0.8, 1.2)
g = random.uniform(0.8, 1.2)
b = random.uniform(0.8, 1.2)
img[(:, :, 0)] *= r
img[(:, :, 1)] *= g
img[(:, :, 2)] *= b
img = (totensor(img) / 255)
gaussian = (torch.zeros_like(img).normal_() * sigma)
img = (img + gaussian)
img = img.clamp(min=0, max=1)
image = img
img = normalize(img)
depth = ((totensor(depth) * md) / scale)
region = (totensor(region) * mr)
segments = (totensor(segments) * ms)
else:
img = img.unsqueeze(0)
img = F.interpolate(img, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(:, 6:(- 6), 8:(- 8))]
image = F.interpolate(image, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(:, 6:(- 6), 8:(- 8))]
depth = F.interpolate(depth, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
region = F.interpolate(region, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
segments = F.interpolate(segments, scale_factor=(1 / 2), mode='bilinear', align_corners=False).squeeze()[(6:(- 6), 8:(- 8))]
img = normalize(img)
return (img, depth, region, segments, image)<|docstring|>transform
:param img:
:param depth:<|endoftext|>
|
5cdda4d015d1d50dd1aed173b01a7db5caa8890e1db8a00d38535d4af6e4fdf1
|
def __init__(self, description=None, label=None, name=None, local_vars_configuration=None):
'WorkflowWorkflowTaskAllOf - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._label = None
self._name = None
self.discriminator = None
if (description is not None):
self.description = description
if (label is not None):
self.label = label
if (name is not None):
self.name = name
|
WorkflowWorkflowTaskAllOf - a model defined in OpenAPI
|
intersight/models/workflow_workflow_task_all_of.py
|
__init__
|
sdnit-se/intersight-python
| 21
|
python
|
def __init__(self, description=None, label=None, name=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._label = None
self._name = None
self.discriminator = None
if (description is not None):
self.description = description
if (label is not None):
self.label = label
if (name is not None):
self.name = name
|
def __init__(self, description=None, label=None, name=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._label = None
self._name = None
self.discriminator = None
if (description is not None):
self.description = description
if (label is not None):
self.label = label
if (name is not None):
self.name = name<|docstring|>WorkflowWorkflowTaskAllOf - a model defined in OpenAPI<|endoftext|>
|
a591322f54c58c95c59e4472001697e020d1b7f06e01db1f3dbc2943e6c74711
|
@property
def description(self):
'Gets the description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n The description of this task instance in the workflow. # noqa: E501\n\n :return: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._description
|
Gets the description of this WorkflowWorkflowTaskAllOf. # noqa: E501
The description of this task instance in the workflow. # noqa: E501
:return: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501
:rtype: str
|
intersight/models/workflow_workflow_task_all_of.py
|
description
|
sdnit-se/intersight-python
| 21
|
python
|
@property
def description(self):
'Gets the description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n The description of this task instance in the workflow. # noqa: E501\n\n :return: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._description
|
@property
def description(self):
'Gets the description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n The description of this task instance in the workflow. # noqa: E501\n\n :return: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._description<|docstring|>Gets the description of this WorkflowWorkflowTaskAllOf. # noqa: E501
The description of this task instance in the workflow. # noqa: E501
:return: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501
:rtype: str<|endoftext|>
|
129a433073048e1e90797369e2e5ebeba8cce4708c41238c258d14d4d0ac90de
|
@description.setter
def description(self, description):
'Sets the description of this WorkflowWorkflowTaskAllOf.\n\n The description of this task instance in the workflow. # noqa: E501\n\n :param description: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._description = description
|
Sets the description of this WorkflowWorkflowTaskAllOf.
The description of this task instance in the workflow. # noqa: E501
:param description: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501
:type: str
|
intersight/models/workflow_workflow_task_all_of.py
|
description
|
sdnit-se/intersight-python
| 21
|
python
|
@description.setter
def description(self, description):
'Sets the description of this WorkflowWorkflowTaskAllOf.\n\n The description of this task instance in the workflow. # noqa: E501\n\n :param description: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._description = description
|
@description.setter
def description(self, description):
'Sets the description of this WorkflowWorkflowTaskAllOf.\n\n The description of this task instance in the workflow. # noqa: E501\n\n :param description: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._description = description<|docstring|>Sets the description of this WorkflowWorkflowTaskAllOf.
The description of this task instance in the workflow. # noqa: E501
:param description: The description of this WorkflowWorkflowTaskAllOf. # noqa: E501
:type: str<|endoftext|>
|
1ed0582ef63d4ae539c03e08ef5206e8d9a154d1d7b9ff4222879d42da50a177
|
@property
def label(self):
'Gets the label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n A user defined label identifier of the workflow task used for UI display. # noqa: E501\n\n :return: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._label
|
Gets the label of this WorkflowWorkflowTaskAllOf. # noqa: E501
A user defined label identifier of the workflow task used for UI display. # noqa: E501
:return: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501
:rtype: str
|
intersight/models/workflow_workflow_task_all_of.py
|
label
|
sdnit-se/intersight-python
| 21
|
python
|
@property
def label(self):
'Gets the label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n A user defined label identifier of the workflow task used for UI display. # noqa: E501\n\n :return: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._label
|
@property
def label(self):
'Gets the label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n A user defined label identifier of the workflow task used for UI display. # noqa: E501\n\n :return: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._label<|docstring|>Gets the label of this WorkflowWorkflowTaskAllOf. # noqa: E501
A user defined label identifier of the workflow task used for UI display. # noqa: E501
:return: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501
:rtype: str<|endoftext|>
|
02057129ca8083b63e5506a2fe8e44380d6fd8203c311d5062ab11d5f30de37a
|
@label.setter
def label(self, label):
'Sets the label of this WorkflowWorkflowTaskAllOf.\n\n A user defined label identifier of the workflow task used for UI display. # noqa: E501\n\n :param label: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._label = label
|
Sets the label of this WorkflowWorkflowTaskAllOf.
A user defined label identifier of the workflow task used for UI display. # noqa: E501
:param label: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501
:type: str
|
intersight/models/workflow_workflow_task_all_of.py
|
label
|
sdnit-se/intersight-python
| 21
|
python
|
@label.setter
def label(self, label):
'Sets the label of this WorkflowWorkflowTaskAllOf.\n\n A user defined label identifier of the workflow task used for UI display. # noqa: E501\n\n :param label: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._label = label
|
@label.setter
def label(self, label):
'Sets the label of this WorkflowWorkflowTaskAllOf.\n\n A user defined label identifier of the workflow task used for UI display. # noqa: E501\n\n :param label: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._label = label<|docstring|>Sets the label of this WorkflowWorkflowTaskAllOf.
A user defined label identifier of the workflow task used for UI display. # noqa: E501
:param label: The label of this WorkflowWorkflowTaskAllOf. # noqa: E501
:type: str<|endoftext|>
|
9495c6449ca4944d8285642c4c7a225e14e2baacc5c0b7fdbab208da4f2a562a
|
@property
def name(self):
'Gets the name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501\n\n :return: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._name
|
Gets the name of this WorkflowWorkflowTaskAllOf. # noqa: E501
The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501
:return: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501
:rtype: str
|
intersight/models/workflow_workflow_task_all_of.py
|
name
|
sdnit-se/intersight-python
| 21
|
python
|
@property
def name(self):
'Gets the name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501\n\n :return: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._name
|
@property
def name(self):
'Gets the name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n\n The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501\n\n :return: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :rtype: str\n '
return self._name<|docstring|>Gets the name of this WorkflowWorkflowTaskAllOf. # noqa: E501
The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501
:return: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501
:rtype: str<|endoftext|>
|
1e1da406416b753caa4b64b5e6108f54f7f5e5570fa4ac424ee04992756cd21d
|
@name.setter
def name(self, name):
'Sets the name of this WorkflowWorkflowTaskAllOf.\n\n The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501\n\n :param name: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._name = name
|
Sets the name of this WorkflowWorkflowTaskAllOf.
The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501
:param name: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501
:type: str
|
intersight/models/workflow_workflow_task_all_of.py
|
name
|
sdnit-se/intersight-python
| 21
|
python
|
@name.setter
def name(self, name):
'Sets the name of this WorkflowWorkflowTaskAllOf.\n\n The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501\n\n :param name: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._name = name
|
@name.setter
def name(self, name):
'Sets the name of this WorkflowWorkflowTaskAllOf.\n\n The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501\n\n :param name: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501\n :type: str\n '
self._name = name<|docstring|>Sets the name of this WorkflowWorkflowTaskAllOf.
The name of the task within the workflow and it must be unique among all WorkflowTasks within a workflow definition. This name serves as the internal unique identifier for the task and is used to pick input and output parameters to feed into other tasks. # noqa: E501
:param name: The name of this WorkflowWorkflowTaskAllOf. # noqa: E501
:type: str<|endoftext|>
|
5a4e41bb6a0def746593298cb605df98f1366e957c4ca89b12010ea7db707963
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
Returns the model properties as a dict
|
intersight/models/workflow_workflow_task_all_of.py
|
to_dict
|
sdnit-se/intersight-python
| 21
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
intersight/models/workflow_workflow_task_all_of.py
|
to_str
|
sdnit-se/intersight-python
| 21
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
intersight/models/workflow_workflow_task_all_of.py
|
__repr__
|
sdnit-se/intersight-python
| 21
|
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
b2915479e8739c841382d2fff7b5ef1b452056764533db58e80acc27f53c87e8
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, WorkflowWorkflowTaskAllOf)):
return False
return (self.to_dict() == other.to_dict())
|
Returns true if both objects are equal
|
intersight/models/workflow_workflow_task_all_of.py
|
__eq__
|
sdnit-se/intersight-python
| 21
|
python
|
def __eq__(self, other):
if (not isinstance(other, WorkflowWorkflowTaskAllOf)):
return False
return (self.to_dict() == other.to_dict())
|
def __eq__(self, other):
if (not isinstance(other, WorkflowWorkflowTaskAllOf)):
return False
return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|>
|
06b6b15430ee663823c3ae1f87f20b8f09cdf254f216abd91e2e5a2113b953ff
|
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, WorkflowWorkflowTaskAllOf)):
return True
return (self.to_dict() != other.to_dict())
|
Returns true if both objects are not equal
|
intersight/models/workflow_workflow_task_all_of.py
|
__ne__
|
sdnit-se/intersight-python
| 21
|
python
|
def __ne__(self, other):
if (not isinstance(other, WorkflowWorkflowTaskAllOf)):
return True
return (self.to_dict() != other.to_dict())
|
def __ne__(self, other):
if (not isinstance(other, WorkflowWorkflowTaskAllOf)):
return True
return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
10818d3105d55491c5eb7e05710ee8bfcf97970e4a44b40ce13ed2b66d805d64
|
def _generate_random_program(n_qubits, length, include_measures=False):
'Randomly sample gates and arguments (qubits, angles)'
if (n_qubits < 3):
raise ValueError('Please request n_qubits >= 3 so we can use 3-qubit gates.')
gates = list(QUANTUM_GATES.values())
prog = Program()
if include_measures:
gates.append(MEASURE)
prog.declare('ro', 'BIT', n_qubits)
for _ in range(length):
gate = random.choice(gates)
possible_qubits = set(range(n_qubits))
sig = inspect.signature(gate)
param_vals = []
for param in sig.parameters:
if (param in ['qubit', 'q1', 'q2', 'control', 'control1', 'control2', 'target', 'target_1', 'target_2']):
param_val = random.choice(list(possible_qubits))
possible_qubits.remove(param_val)
elif (param == 'classical_reg'):
qubit = random.choice(list(possible_qubits))
param_val = MemoryReference('ro', qubit)
possible_qubits.remove(qubit)
elif (param == 'angle'):
param_val = random.uniform(((- 2) * pi), (2 * pi))
else:
raise ValueError('Unknown gate parameter {}'.format(param))
param_vals.append(param_val)
prog += gate(*param_vals)
return prog
|
Randomly sample gates and arguments (qubits, angles)
|
test/unit/test_reference_wavefunction.py
|
_generate_random_program
|
mmehrani/pyquil
| 677
|
python
|
def _generate_random_program(n_qubits, length, include_measures=False):
if (n_qubits < 3):
raise ValueError('Please request n_qubits >= 3 so we can use 3-qubit gates.')
gates = list(QUANTUM_GATES.values())
prog = Program()
if include_measures:
gates.append(MEASURE)
prog.declare('ro', 'BIT', n_qubits)
for _ in range(length):
gate = random.choice(gates)
possible_qubits = set(range(n_qubits))
sig = inspect.signature(gate)
param_vals = []
for param in sig.parameters:
if (param in ['qubit', 'q1', 'q2', 'control', 'control1', 'control2', 'target', 'target_1', 'target_2']):
param_val = random.choice(list(possible_qubits))
possible_qubits.remove(param_val)
elif (param == 'classical_reg'):
qubit = random.choice(list(possible_qubits))
param_val = MemoryReference('ro', qubit)
possible_qubits.remove(qubit)
elif (param == 'angle'):
param_val = random.uniform(((- 2) * pi), (2 * pi))
else:
raise ValueError('Unknown gate parameter {}'.format(param))
param_vals.append(param_val)
prog += gate(*param_vals)
return prog
|
def _generate_random_program(n_qubits, length, include_measures=False):
if (n_qubits < 3):
raise ValueError('Please request n_qubits >= 3 so we can use 3-qubit gates.')
gates = list(QUANTUM_GATES.values())
prog = Program()
if include_measures:
gates.append(MEASURE)
prog.declare('ro', 'BIT', n_qubits)
for _ in range(length):
gate = random.choice(gates)
possible_qubits = set(range(n_qubits))
sig = inspect.signature(gate)
param_vals = []
for param in sig.parameters:
if (param in ['qubit', 'q1', 'q2', 'control', 'control1', 'control2', 'target', 'target_1', 'target_2']):
param_val = random.choice(list(possible_qubits))
possible_qubits.remove(param_val)
elif (param == 'classical_reg'):
qubit = random.choice(list(possible_qubits))
param_val = MemoryReference('ro', qubit)
possible_qubits.remove(qubit)
elif (param == 'angle'):
param_val = random.uniform(((- 2) * pi), (2 * pi))
else:
raise ValueError('Unknown gate parameter {}'.format(param))
param_vals.append(param_val)
prog += gate(*param_vals)
return prog<|docstring|>Randomly sample gates and arguments (qubits, angles)<|endoftext|>
|
da39fb7ddd78014b5d36eb8de086109391bef09abd6580119bb8d148967a7976
|
@site.add(APPLICATION)
def view():
'Returns the FastGridTemplate App'
pn.config.sizing_mode = 'stretch_width'
app = FastGridTemplate(title='FastGridTemplate by awesome-panel.org', row_height=55, prevent_collision=True, save_layout=True)
app.main[(0:9, 0:6)] = APPLICATION.intro_section()
app.main[(0:9, 6:12)] = _create_hvplot()
app.main[(9:16, 0:12)] = EchartsApp().view()
app.main[(16:30, 0:3)] = _create_fast_button_card()
app.main[(16:30, 3:6)] = _create_fast_checkbox_card()
app.main[(16:30, 6:9)] = _create_fast_literal_input_card()
app.main[(16:30, 9:12)] = _create_fast_switch_card()
return app
|
Returns the FastGridTemplate App
|
application/pages/fast/fast_grid_template_app.py
|
view
|
EmanueleCannizzaro/awesome-panel
| 179
|
python
|
@site.add(APPLICATION)
def view():
pn.config.sizing_mode = 'stretch_width'
app = FastGridTemplate(title='FastGridTemplate by awesome-panel.org', row_height=55, prevent_collision=True, save_layout=True)
app.main[(0:9, 0:6)] = APPLICATION.intro_section()
app.main[(0:9, 6:12)] = _create_hvplot()
app.main[(9:16, 0:12)] = EchartsApp().view()
app.main[(16:30, 0:3)] = _create_fast_button_card()
app.main[(16:30, 3:6)] = _create_fast_checkbox_card()
app.main[(16:30, 6:9)] = _create_fast_literal_input_card()
app.main[(16:30, 9:12)] = _create_fast_switch_card()
return app
|
@site.add(APPLICATION)
def view():
pn.config.sizing_mode = 'stretch_width'
app = FastGridTemplate(title='FastGridTemplate by awesome-panel.org', row_height=55, prevent_collision=True, save_layout=True)
app.main[(0:9, 0:6)] = APPLICATION.intro_section()
app.main[(0:9, 6:12)] = _create_hvplot()
app.main[(9:16, 0:12)] = EchartsApp().view()
app.main[(16:30, 0:3)] = _create_fast_button_card()
app.main[(16:30, 3:6)] = _create_fast_checkbox_card()
app.main[(16:30, 6:9)] = _create_fast_literal_input_card()
app.main[(16:30, 9:12)] = _create_fast_switch_card()
return app<|docstring|>Returns the FastGridTemplate App<|endoftext|>
|
0531cfe9a02962f3d0ceec8de7d8fa1d841032b9aa71c6e3fc638308499e3eb5
|
def __init__(self, lst):
'\n Parameters\n ----------\n lst : `object`\n A single instance or an iterable of ``(QueryResponse, client)``\n pairs or ``QueryResponse`` objects with a ``.client`` attribute.\n '
tmplst = []
self._numfile = 0
if isinstance(lst, (QueryResponse, vsoQueryResponse)):
if (not hasattr(lst, 'client')):
raise ValueError('A {} object is only a valid input to UnifiedResponse if it has a client attribute.'.format(type(lst).__name__))
tmplst.append(lst)
self._numfile = len(lst)
else:
for block in lst:
if (isinstance(block, tuple) and (len(block) == 2)):
block[0].client = block[1]
tmplst.append(block[0])
self._numfile += len(block[0])
elif hasattr(block, 'client'):
tmplst.append(block)
self._numfile += len(block)
else:
raise ValueError('{} is not a valid input to UnifiedResponse.'.format(type(lst)))
self._list = tmplst
|
Parameters
----------
lst : `object`
A single instance or an iterable of ``(QueryResponse, client)``
pairs or ``QueryResponse`` objects with a ``.client`` attribute.
|
sunpy/net/fido_factory.py
|
__init__
|
amogh-jrules/sunpy
| 0
|
python
|
def __init__(self, lst):
'\n Parameters\n ----------\n lst : `object`\n A single instance or an iterable of ``(QueryResponse, client)``\n pairs or ``QueryResponse`` objects with a ``.client`` attribute.\n '
tmplst = []
self._numfile = 0
if isinstance(lst, (QueryResponse, vsoQueryResponse)):
if (not hasattr(lst, 'client')):
raise ValueError('A {} object is only a valid input to UnifiedResponse if it has a client attribute.'.format(type(lst).__name__))
tmplst.append(lst)
self._numfile = len(lst)
else:
for block in lst:
if (isinstance(block, tuple) and (len(block) == 2)):
block[0].client = block[1]
tmplst.append(block[0])
self._numfile += len(block[0])
elif hasattr(block, 'client'):
tmplst.append(block)
self._numfile += len(block)
else:
raise ValueError('{} is not a valid input to UnifiedResponse.'.format(type(lst)))
self._list = tmplst
|
def __init__(self, lst):
'\n Parameters\n ----------\n lst : `object`\n A single instance or an iterable of ``(QueryResponse, client)``\n pairs or ``QueryResponse`` objects with a ``.client`` attribute.\n '
tmplst = []
self._numfile = 0
if isinstance(lst, (QueryResponse, vsoQueryResponse)):
if (not hasattr(lst, 'client')):
raise ValueError('A {} object is only a valid input to UnifiedResponse if it has a client attribute.'.format(type(lst).__name__))
tmplst.append(lst)
self._numfile = len(lst)
else:
for block in lst:
if (isinstance(block, tuple) and (len(block) == 2)):
block[0].client = block[1]
tmplst.append(block[0])
self._numfile += len(block[0])
elif hasattr(block, 'client'):
tmplst.append(block)
self._numfile += len(block)
else:
raise ValueError('{} is not a valid input to UnifiedResponse.'.format(type(lst)))
self._list = tmplst<|docstring|>Parameters
----------
lst : `object`
A single instance or an iterable of ``(QueryResponse, client)``
pairs or ``QueryResponse`` objects with a ``.client`` attribute.<|endoftext|>
|
b827681e882e0fce2753af634b9b8b91f6591f6cbcf99eea0680de4d81f3c550
|
def _handle_record_slice(self, client_resp, record_slice):
'\n Given a slice to be applied to the results from a single client, return\n an object of the same type as client_resp.\n '
resp_type = type(client_resp)
if isinstance(record_slice, int):
resp = [client_resp[record_slice]]
else:
resp = client_resp[record_slice]
ret = resp_type(resp)
ret.client = client_resp.client
return ret
|
Given a slice to be applied to the results from a single client, return
an object of the same type as client_resp.
|
sunpy/net/fido_factory.py
|
_handle_record_slice
|
amogh-jrules/sunpy
| 0
|
python
|
def _handle_record_slice(self, client_resp, record_slice):
'\n Given a slice to be applied to the results from a single client, return\n an object of the same type as client_resp.\n '
resp_type = type(client_resp)
if isinstance(record_slice, int):
resp = [client_resp[record_slice]]
else:
resp = client_resp[record_slice]
ret = resp_type(resp)
ret.client = client_resp.client
return ret
|
def _handle_record_slice(self, client_resp, record_slice):
'\n Given a slice to be applied to the results from a single client, return\n an object of the same type as client_resp.\n '
resp_type = type(client_resp)
if isinstance(record_slice, int):
resp = [client_resp[record_slice]]
else:
resp = client_resp[record_slice]
ret = resp_type(resp)
ret.client = client_resp.client
return ret<|docstring|>Given a slice to be applied to the results from a single client, return
an object of the same type as client_resp.<|endoftext|>
|
48d84a3e11d21aefcb6296192947d13e27b9eb0df8101cc6a8561a28c2778981
|
def __getitem__(self, aslice):
'\n Support slicing the UnifiedResponse as a 2D object.\n\n The first index is to the client and the second index is the records\n returned from those clients.\n '
if isinstance(aslice, (int, slice)):
ret = self._list[aslice]
elif isinstance(aslice, tuple):
if (len(aslice) > 2):
raise IndexError('UnifiedResponse objects can only be sliced with one or two indices.')
if isinstance(aslice[0], int):
client_resp = self._list[aslice[0]]
ret = self._handle_record_slice(client_resp, aslice[1])
else:
intermediate = self._list[aslice[0]]
ret = []
for client_resp in intermediate:
resp = self._handle_record_slice(client_resp, aslice[1])
ret.append(resp)
else:
raise IndexError('UnifiedResponse objects must be sliced with integers.')
return UnifiedResponse(ret)
|
Support slicing the UnifiedResponse as a 2D object.
The first index is to the client and the second index is the records
returned from those clients.
|
sunpy/net/fido_factory.py
|
__getitem__
|
amogh-jrules/sunpy
| 0
|
python
|
def __getitem__(self, aslice):
'\n Support slicing the UnifiedResponse as a 2D object.\n\n The first index is to the client and the second index is the records\n returned from those clients.\n '
if isinstance(aslice, (int, slice)):
ret = self._list[aslice]
elif isinstance(aslice, tuple):
if (len(aslice) > 2):
raise IndexError('UnifiedResponse objects can only be sliced with one or two indices.')
if isinstance(aslice[0], int):
client_resp = self._list[aslice[0]]
ret = self._handle_record_slice(client_resp, aslice[1])
else:
intermediate = self._list[aslice[0]]
ret = []
for client_resp in intermediate:
resp = self._handle_record_slice(client_resp, aslice[1])
ret.append(resp)
else:
raise IndexError('UnifiedResponse objects must be sliced with integers.')
return UnifiedResponse(ret)
|
def __getitem__(self, aslice):
'\n Support slicing the UnifiedResponse as a 2D object.\n\n The first index is to the client and the second index is the records\n returned from those clients.\n '
if isinstance(aslice, (int, slice)):
ret = self._list[aslice]
elif isinstance(aslice, tuple):
if (len(aslice) > 2):
raise IndexError('UnifiedResponse objects can only be sliced with one or two indices.')
if isinstance(aslice[0], int):
client_resp = self._list[aslice[0]]
ret = self._handle_record_slice(client_resp, aslice[1])
else:
intermediate = self._list[aslice[0]]
ret = []
for client_resp in intermediate:
resp = self._handle_record_slice(client_resp, aslice[1])
ret.append(resp)
else:
raise IndexError('UnifiedResponse objects must be sliced with integers.')
return UnifiedResponse(ret)<|docstring|>Support slicing the UnifiedResponse as a 2D object.
The first index is to the client and the second index is the records
returned from those clients.<|endoftext|>
|
fdf6b54661cc78230446f2ecb05572992ea3258beb4c62b05a331c73baa0c70c
|
def get_response(self, i):
'\n Get the actual response rather than another UnifiedResponse object.\n '
return self._list[i]
|
Get the actual response rather than another UnifiedResponse object.
|
sunpy/net/fido_factory.py
|
get_response
|
amogh-jrules/sunpy
| 0
|
python
|
def get_response(self, i):
'\n \n '
return self._list[i]
|
def get_response(self, i):
'\n \n '
return self._list[i]<|docstring|>Get the actual response rather than another UnifiedResponse object.<|endoftext|>
|
3bda2ac90c27a9347ec9640929fa93d48086f91385f79a4617f75b7944202785
|
def response_block_properties(self):
'\n Returns a set of class attributes on all the response blocks.\n\n Returns\n -------\n s : list\n List of strings, containing attribute names in the response blocks.\n '
s = self.get_response(0).response_block_properties()
for i in range(1, len(self)):
s.intersection(self.get_response(i).response_block_properties())
return s
|
Returns a set of class attributes on all the response blocks.
Returns
-------
s : list
List of strings, containing attribute names in the response blocks.
|
sunpy/net/fido_factory.py
|
response_block_properties
|
amogh-jrules/sunpy
| 0
|
python
|
def response_block_properties(self):
'\n Returns a set of class attributes on all the response blocks.\n\n Returns\n -------\n s : list\n List of strings, containing attribute names in the response blocks.\n '
s = self.get_response(0).response_block_properties()
for i in range(1, len(self)):
s.intersection(self.get_response(i).response_block_properties())
return s
|
def response_block_properties(self):
'\n Returns a set of class attributes on all the response blocks.\n\n Returns\n -------\n s : list\n List of strings, containing attribute names in the response blocks.\n '
s = self.get_response(0).response_block_properties()
for i in range(1, len(self)):
s.intersection(self.get_response(i).response_block_properties())
return s<|docstring|>Returns a set of class attributes on all the response blocks.
Returns
-------
s : list
List of strings, containing attribute names in the response blocks.<|endoftext|>
|
3dfedcd72c366b9a9013efc1006fd4b7aa3228858a74b5f40ca55cf5b0838fb6
|
@property
def tables(self):
'\n Returns a list of `astropy.table.Table` for all responses present in a specific\n `~sunpy.net.fido_factory.UnifiedResponse` object. They can then be used\n to perform key-based indexing of objects of either type\n `sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or\n `sunpy.net.jsoc.JSOCClient`\n\n Returns\n -------\n `list`\n A list of `astropy.table.Table`, consisting of data either from the\n `sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or\n `sunpy.net.jsoc.JSOCClient`.\n '
tables = []
for block in self.responses:
tables.append(block.build_table())
return tables
|
Returns a list of `astropy.table.Table` for all responses present in a specific
`~sunpy.net.fido_factory.UnifiedResponse` object. They can then be used
to perform key-based indexing of objects of either type
`sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or
`sunpy.net.jsoc.JSOCClient`
Returns
-------
`list`
A list of `astropy.table.Table`, consisting of data either from the
`sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or
`sunpy.net.jsoc.JSOCClient`.
|
sunpy/net/fido_factory.py
|
tables
|
amogh-jrules/sunpy
| 0
|
python
|
@property
def tables(self):
'\n Returns a list of `astropy.table.Table` for all responses present in a specific\n `~sunpy.net.fido_factory.UnifiedResponse` object. They can then be used\n to perform key-based indexing of objects of either type\n `sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or\n `sunpy.net.jsoc.JSOCClient`\n\n Returns\n -------\n `list`\n A list of `astropy.table.Table`, consisting of data either from the\n `sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or\n `sunpy.net.jsoc.JSOCClient`.\n '
tables = []
for block in self.responses:
tables.append(block.build_table())
return tables
|
@property
def tables(self):
'\n Returns a list of `astropy.table.Table` for all responses present in a specific\n `~sunpy.net.fido_factory.UnifiedResponse` object. They can then be used\n to perform key-based indexing of objects of either type\n `sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or\n `sunpy.net.jsoc.JSOCClient`\n\n Returns\n -------\n `list`\n A list of `astropy.table.Table`, consisting of data either from the\n `sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or\n `sunpy.net.jsoc.JSOCClient`.\n '
tables = []
for block in self.responses:
tables.append(block.build_table())
return tables<|docstring|>Returns a list of `astropy.table.Table` for all responses present in a specific
`~sunpy.net.fido_factory.UnifiedResponse` object. They can then be used
to perform key-based indexing of objects of either type
`sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or
`sunpy.net.jsoc.JSOCClient`
Returns
-------
`list`
A list of `astropy.table.Table`, consisting of data either from the
`sunpy.net.dataretriever.client.QueryResponse`, `sunpy.net.vso.QueryResponse` or
`sunpy.net.jsoc.JSOCClient`.<|endoftext|>
|
095bded969ef6079a65f2ac1e066efdf538f661f71cd913d0f7a3e4b522b1275
|
@property
def responses(self):
'\n A generator of all the `sunpy.net.dataretriever.client.QueryResponse`\n objects contained in the `~sunpy.net.fido_factory.UnifiedResponse`\n object.\n '
for i in range(len(self)):
(yield self.get_response(i))
|
A generator of all the `sunpy.net.dataretriever.client.QueryResponse`
objects contained in the `~sunpy.net.fido_factory.UnifiedResponse`
object.
|
sunpy/net/fido_factory.py
|
responses
|
amogh-jrules/sunpy
| 0
|
python
|
@property
def responses(self):
'\n A generator of all the `sunpy.net.dataretriever.client.QueryResponse`\n objects contained in the `~sunpy.net.fido_factory.UnifiedResponse`\n object.\n '
for i in range(len(self)):
(yield self.get_response(i))
|
@property
def responses(self):
'\n A generator of all the `sunpy.net.dataretriever.client.QueryResponse`\n objects contained in the `~sunpy.net.fido_factory.UnifiedResponse`\n object.\n '
for i in range(len(self)):
(yield self.get_response(i))<|docstring|>A generator of all the `sunpy.net.dataretriever.client.QueryResponse`
objects contained in the `~sunpy.net.fido_factory.UnifiedResponse`
object.<|endoftext|>
|
9a57f9bf3f43fa2cf96e80f7ec7f4e7b697d2abb536c9b3a10bf68cf7289c126
|
def search(self, *query):
"\n Query for data in form of multiple parameters.\n\n Examples\n --------\n Query for LYRA timeseries data for the time range ('2012/3/4','2012/3/6')\n\n >>> from sunpy.net import Fido, attrs as a\n >>> import astropy.units as u\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), a.Instrument('lyra')) # doctest: +REMOTE_DATA\n\n Query for data from Nobeyama Radioheliograph and RHESSI\n\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),\n ... (a.Instrument('norh') & a.Wavelength(17*u.GHz)) | a.Instrument('rhessi')) # doctest: +REMOTE_DATA\n\n Query for 304 Angstrom SDO AIA data with a cadence of 10 minutes\n\n >>> import astropy.units as u\n >>> from sunpy.net import Fido, attrs as a\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),\n ... a.Instrument('AIA'),\n ... a.Wavelength(304*u.angstrom, 304*u.angstrom),\n ... a.Sample(10*u.minute)) # doctest: +REMOTE_DATA\n\n Parameters\n ----------\n query : `sunpy.net.vso.attrs`, `sunpy.net.jsoc.attrs`\n A query consisting of multiple parameters which define the\n requested data. The query is specified using attributes from the\n VSO and the JSOC. The query can mix attributes from the VSO and\n the JSOC.\n\n Returns\n -------\n `sunpy.net.fido_factory.UnifiedResponse`\n Container of responses returned by clients servicing query.\n\n Notes\n -----\n The conjunction 'and' transforms query into disjunctive normal form\n ie. query is now of form A & B or ((A & B) | (C & D))\n This helps in modularising query into parts and handling each of the\n parts individually.\n "
query = attr.and_(*query)
return UnifiedResponse(query_walker.create(query, self))
|
Query for data in form of multiple parameters.
Examples
--------
Query for LYRA timeseries data for the time range ('2012/3/4','2012/3/6')
>>> from sunpy.net import Fido, attrs as a
>>> import astropy.units as u
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), a.Instrument('lyra')) # doctest: +REMOTE_DATA
Query for data from Nobeyama Radioheliograph and RHESSI
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),
... (a.Instrument('norh') & a.Wavelength(17*u.GHz)) | a.Instrument('rhessi')) # doctest: +REMOTE_DATA
Query for 304 Angstrom SDO AIA data with a cadence of 10 minutes
>>> import astropy.units as u
>>> from sunpy.net import Fido, attrs as a
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),
... a.Instrument('AIA'),
... a.Wavelength(304*u.angstrom, 304*u.angstrom),
... a.Sample(10*u.minute)) # doctest: +REMOTE_DATA
Parameters
----------
query : `sunpy.net.vso.attrs`, `sunpy.net.jsoc.attrs`
A query consisting of multiple parameters which define the
requested data. The query is specified using attributes from the
VSO and the JSOC. The query can mix attributes from the VSO and
the JSOC.
Returns
-------
`sunpy.net.fido_factory.UnifiedResponse`
Container of responses returned by clients servicing query.
Notes
-----
The conjunction 'and' transforms query into disjunctive normal form
ie. query is now of form A & B or ((A & B) | (C & D))
This helps in modularising query into parts and handling each of the
parts individually.
|
sunpy/net/fido_factory.py
|
search
|
amogh-jrules/sunpy
| 0
|
python
|
def search(self, *query):
"\n Query for data in form of multiple parameters.\n\n Examples\n --------\n Query for LYRA timeseries data for the time range ('2012/3/4','2012/3/6')\n\n >>> from sunpy.net import Fido, attrs as a\n >>> import astropy.units as u\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), a.Instrument('lyra')) # doctest: +REMOTE_DATA\n\n Query for data from Nobeyama Radioheliograph and RHESSI\n\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),\n ... (a.Instrument('norh') & a.Wavelength(17*u.GHz)) | a.Instrument('rhessi')) # doctest: +REMOTE_DATA\n\n Query for 304 Angstrom SDO AIA data with a cadence of 10 minutes\n\n >>> import astropy.units as u\n >>> from sunpy.net import Fido, attrs as a\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),\n ... a.Instrument('AIA'),\n ... a.Wavelength(304*u.angstrom, 304*u.angstrom),\n ... a.Sample(10*u.minute)) # doctest: +REMOTE_DATA\n\n Parameters\n ----------\n query : `sunpy.net.vso.attrs`, `sunpy.net.jsoc.attrs`\n A query consisting of multiple parameters which define the\n requested data. The query is specified using attributes from the\n VSO and the JSOC. The query can mix attributes from the VSO and\n the JSOC.\n\n Returns\n -------\n `sunpy.net.fido_factory.UnifiedResponse`\n Container of responses returned by clients servicing query.\n\n Notes\n -----\n The conjunction 'and' transforms query into disjunctive normal form\n ie. query is now of form A & B or ((A & B) | (C & D))\n This helps in modularising query into parts and handling each of the\n parts individually.\n "
query = attr.and_(*query)
return UnifiedResponse(query_walker.create(query, self))
|
def search(self, *query):
"\n Query for data in form of multiple parameters.\n\n Examples\n --------\n Query for LYRA timeseries data for the time range ('2012/3/4','2012/3/6')\n\n >>> from sunpy.net import Fido, attrs as a\n >>> import astropy.units as u\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), a.Instrument('lyra')) # doctest: +REMOTE_DATA\n\n Query for data from Nobeyama Radioheliograph and RHESSI\n\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),\n ... (a.Instrument('norh') & a.Wavelength(17*u.GHz)) | a.Instrument('rhessi')) # doctest: +REMOTE_DATA\n\n Query for 304 Angstrom SDO AIA data with a cadence of 10 minutes\n\n >>> import astropy.units as u\n >>> from sunpy.net import Fido, attrs as a\n >>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),\n ... a.Instrument('AIA'),\n ... a.Wavelength(304*u.angstrom, 304*u.angstrom),\n ... a.Sample(10*u.minute)) # doctest: +REMOTE_DATA\n\n Parameters\n ----------\n query : `sunpy.net.vso.attrs`, `sunpy.net.jsoc.attrs`\n A query consisting of multiple parameters which define the\n requested data. The query is specified using attributes from the\n VSO and the JSOC. The query can mix attributes from the VSO and\n the JSOC.\n\n Returns\n -------\n `sunpy.net.fido_factory.UnifiedResponse`\n Container of responses returned by clients servicing query.\n\n Notes\n -----\n The conjunction 'and' transforms query into disjunctive normal form\n ie. query is now of form A & B or ((A & B) | (C & D))\n This helps in modularising query into parts and handling each of the\n parts individually.\n "
query = attr.and_(*query)
return UnifiedResponse(query_walker.create(query, self))<|docstring|>Query for data in form of multiple parameters.
Examples
--------
Query for LYRA timeseries data for the time range ('2012/3/4','2012/3/6')
>>> from sunpy.net import Fido, attrs as a
>>> import astropy.units as u
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), a.Instrument('lyra')) # doctest: +REMOTE_DATA
Query for data from Nobeyama Radioheliograph and RHESSI
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),
... (a.Instrument('norh') & a.Wavelength(17*u.GHz)) | a.Instrument('rhessi')) # doctest: +REMOTE_DATA
Query for 304 Angstrom SDO AIA data with a cadence of 10 minutes
>>> import astropy.units as u
>>> from sunpy.net import Fido, attrs as a
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),
... a.Instrument('AIA'),
... a.Wavelength(304*u.angstrom, 304*u.angstrom),
... a.Sample(10*u.minute)) # doctest: +REMOTE_DATA
Parameters
----------
query : `sunpy.net.vso.attrs`, `sunpy.net.jsoc.attrs`
A query consisting of multiple parameters which define the
requested data. The query is specified using attributes from the
VSO and the JSOC. The query can mix attributes from the VSO and
the JSOC.
Returns
-------
`sunpy.net.fido_factory.UnifiedResponse`
Container of responses returned by clients servicing query.
Notes
-----
The conjunction 'and' transforms query into disjunctive normal form
ie. query is now of form A & B or ((A & B) | (C & D))
This helps in modularising query into parts and handling each of the
parts individually.<|endoftext|>
|
30047d281a1640065e3669b368c8335e39a2574e6ee400b0d3e9caab1e13d40e
|
def fetch(self, *query_results, path=None, max_conn=5, progress=True, overwrite=False, downloader=None, **kwargs):
'\n Download the records represented by\n `~sunpy.net.fido_factory.UnifiedResponse` objects.\n\n Parameters\n ----------\n query_results : `sunpy.net.fido_factory.UnifiedResponse`\n Container returned by query method, or multiple.\n\n path : `str`\n The directory to retrieve the files into. Can refer to any fields\n in `UnifiedResponse.response_block_properties` via string formatting,\n moreover the file-name of the file downloaded can be referred to as file,\n e.g. "{source}/{instrument}/{time.start}/{file}".\n\n max_conn : `int`, optional\n The number of parallel download slots.\n\n progress : `bool`, optional\n If `True` show a progress bar showing how many of the total files\n have been downloaded. If `False`, no progress bars will be shown at all.\n\n overwrite : `bool` or `str`, optional\n Determine how to handle downloading if a file already exists with the\n same name. If `False` the file download will be skipped and the path\n returned to the existing file, if `True` the file will be downloaded\n and the existing file will be overwritten, if `\'unique\'` the filename\n will be modified to be unique.\n\n downloader : `parfive.Downloader`, optional\n The download manager to use. If specified the ``max_conn``,\n ``progress`` and ``overwrite`` arguments are ignored.\n\n Returns\n -------\n `parfive.Results`\n\n Examples\n --------\n >>> from sunpy.net.attrs import Time, Instrument\n >>> unifresp = Fido.search(Time(\'2012/3/4\',\'2012/3/5\'), Instrument(\'EIT\')) # doctest: +REMOTE_DATA\n >>> filepaths = Fido.fetch(unifresp) # doctest: +SKIP\n\n If any downloads fail, they can be retried by passing the `parfive.Results` object back into ``fetch``.\n\n >>> filepaths = Fido.fetch(filepaths) # doctest: +SKIP\n\n '
if (path is not None):
exists = list(filter((lambda p: p.exists()), Path(path).parents))
if (not os.access(exists[0], os.W_OK)):
raise PermissionError(f'You do not have permission to write to the directory {exists[0]}.')
if ('wait' in kwargs):
raise ValueError('wait is not a valid keyword argument to Fido.fetch.')
if (downloader is None):
downloader = Downloader(max_conn=max_conn, progress=progress, overwrite=overwrite)
elif (not isinstance(downloader, Downloader)):
raise TypeError('The downloader argument must be a parfive.Downloader object.')
retries = [isinstance(arg, Results) for arg in query_results]
if all(retries):
results = Results()
for retry in query_results:
dr = downloader.retry(retry)
results.data += dr.data
results._errors += dr._errors
return results
elif any(retries):
raise TypeError('If any arguments to fetch are `parfive.Results` objects, all arguments must be.')
reslist = []
for query_result in query_results:
for block in query_result.responses:
reslist.append(block.client.fetch(block, path=path, downloader=downloader, wait=False, **kwargs))
results = downloader.download()
for result in reslist:
if (result is None):
continue
if (not isinstance(result, Results)):
raise TypeError('If wait is False a client must return a parfive.Downloader and either None or a parfive.Results object.')
results.data += result.data
results._errors += result.errors
return results
|
Download the records represented by
`~sunpy.net.fido_factory.UnifiedResponse` objects.
Parameters
----------
query_results : `sunpy.net.fido_factory.UnifiedResponse`
Container returned by query method, or multiple.
path : `str`
The directory to retrieve the files into. Can refer to any fields
in `UnifiedResponse.response_block_properties` via string formatting,
moreover the file-name of the file downloaded can be referred to as file,
e.g. "{source}/{instrument}/{time.start}/{file}".
max_conn : `int`, optional
The number of parallel download slots.
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bars will be shown at all.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if `'unique'` the filename
will be modified to be unique.
downloader : `parfive.Downloader`, optional
The download manager to use. If specified the ``max_conn``,
``progress`` and ``overwrite`` arguments are ignored.
Returns
-------
`parfive.Results`
Examples
--------
>>> from sunpy.net.attrs import Time, Instrument
>>> unifresp = Fido.search(Time('2012/3/4','2012/3/5'), Instrument('EIT')) # doctest: +REMOTE_DATA
>>> filepaths = Fido.fetch(unifresp) # doctest: +SKIP
If any downloads fail, they can be retried by passing the `parfive.Results` object back into ``fetch``.
>>> filepaths = Fido.fetch(filepaths) # doctest: +SKIP
|
sunpy/net/fido_factory.py
|
fetch
|
amogh-jrules/sunpy
| 0
|
python
|
def fetch(self, *query_results, path=None, max_conn=5, progress=True, overwrite=False, downloader=None, **kwargs):
'\n Download the records represented by\n `~sunpy.net.fido_factory.UnifiedResponse` objects.\n\n Parameters\n ----------\n query_results : `sunpy.net.fido_factory.UnifiedResponse`\n Container returned by query method, or multiple.\n\n path : `str`\n The directory to retrieve the files into. Can refer to any fields\n in `UnifiedResponse.response_block_properties` via string formatting,\n moreover the file-name of the file downloaded can be referred to as file,\n e.g. "{source}/{instrument}/{time.start}/{file}".\n\n max_conn : `int`, optional\n The number of parallel download slots.\n\n progress : `bool`, optional\n If `True` show a progress bar showing how many of the total files\n have been downloaded. If `False`, no progress bars will be shown at all.\n\n overwrite : `bool` or `str`, optional\n Determine how to handle downloading if a file already exists with the\n same name. If `False` the file download will be skipped and the path\n returned to the existing file, if `True` the file will be downloaded\n and the existing file will be overwritten, if `\'unique\'` the filename\n will be modified to be unique.\n\n downloader : `parfive.Downloader`, optional\n The download manager to use. If specified the ``max_conn``,\n ``progress`` and ``overwrite`` arguments are ignored.\n\n Returns\n -------\n `parfive.Results`\n\n Examples\n --------\n >>> from sunpy.net.attrs import Time, Instrument\n >>> unifresp = Fido.search(Time(\'2012/3/4\',\'2012/3/5\'), Instrument(\'EIT\')) # doctest: +REMOTE_DATA\n >>> filepaths = Fido.fetch(unifresp) # doctest: +SKIP\n\n If any downloads fail, they can be retried by passing the `parfive.Results` object back into ``fetch``.\n\n >>> filepaths = Fido.fetch(filepaths) # doctest: +SKIP\n\n '
if (path is not None):
exists = list(filter((lambda p: p.exists()), Path(path).parents))
if (not os.access(exists[0], os.W_OK)):
raise PermissionError(f'You do not have permission to write to the directory {exists[0]}.')
if ('wait' in kwargs):
raise ValueError('wait is not a valid keyword argument to Fido.fetch.')
if (downloader is None):
downloader = Downloader(max_conn=max_conn, progress=progress, overwrite=overwrite)
elif (not isinstance(downloader, Downloader)):
raise TypeError('The downloader argument must be a parfive.Downloader object.')
retries = [isinstance(arg, Results) for arg in query_results]
if all(retries):
results = Results()
for retry in query_results:
dr = downloader.retry(retry)
results.data += dr.data
results._errors += dr._errors
return results
elif any(retries):
raise TypeError('If any arguments to fetch are `parfive.Results` objects, all arguments must be.')
reslist = []
for query_result in query_results:
for block in query_result.responses:
reslist.append(block.client.fetch(block, path=path, downloader=downloader, wait=False, **kwargs))
results = downloader.download()
for result in reslist:
if (result is None):
continue
if (not isinstance(result, Results)):
raise TypeError('If wait is False a client must return a parfive.Downloader and either None or a parfive.Results object.')
results.data += result.data
results._errors += result.errors
return results
|
def fetch(self, *query_results, path=None, max_conn=5, progress=True, overwrite=False, downloader=None, **kwargs):
'\n Download the records represented by\n `~sunpy.net.fido_factory.UnifiedResponse` objects.\n\n Parameters\n ----------\n query_results : `sunpy.net.fido_factory.UnifiedResponse`\n Container returned by query method, or multiple.\n\n path : `str`\n The directory to retrieve the files into. Can refer to any fields\n in `UnifiedResponse.response_block_properties` via string formatting,\n moreover the file-name of the file downloaded can be referred to as file,\n e.g. "{source}/{instrument}/{time.start}/{file}".\n\n max_conn : `int`, optional\n The number of parallel download slots.\n\n progress : `bool`, optional\n If `True` show a progress bar showing how many of the total files\n have been downloaded. If `False`, no progress bars will be shown at all.\n\n overwrite : `bool` or `str`, optional\n Determine how to handle downloading if a file already exists with the\n same name. If `False` the file download will be skipped and the path\n returned to the existing file, if `True` the file will be downloaded\n and the existing file will be overwritten, if `\'unique\'` the filename\n will be modified to be unique.\n\n downloader : `parfive.Downloader`, optional\n The download manager to use. If specified the ``max_conn``,\n ``progress`` and ``overwrite`` arguments are ignored.\n\n Returns\n -------\n `parfive.Results`\n\n Examples\n --------\n >>> from sunpy.net.attrs import Time, Instrument\n >>> unifresp = Fido.search(Time(\'2012/3/4\',\'2012/3/5\'), Instrument(\'EIT\')) # doctest: +REMOTE_DATA\n >>> filepaths = Fido.fetch(unifresp) # doctest: +SKIP\n\n If any downloads fail, they can be retried by passing the `parfive.Results` object back into ``fetch``.\n\n >>> filepaths = Fido.fetch(filepaths) # doctest: +SKIP\n\n '
if (path is not None):
exists = list(filter((lambda p: p.exists()), Path(path).parents))
if (not os.access(exists[0], os.W_OK)):
raise PermissionError(f'You do not have permission to write to the directory {exists[0]}.')
if ('wait' in kwargs):
raise ValueError('wait is not a valid keyword argument to Fido.fetch.')
if (downloader is None):
downloader = Downloader(max_conn=max_conn, progress=progress, overwrite=overwrite)
elif (not isinstance(downloader, Downloader)):
raise TypeError('The downloader argument must be a parfive.Downloader object.')
retries = [isinstance(arg, Results) for arg in query_results]
if all(retries):
results = Results()
for retry in query_results:
dr = downloader.retry(retry)
results.data += dr.data
results._errors += dr._errors
return results
elif any(retries):
raise TypeError('If any arguments to fetch are `parfive.Results` objects, all arguments must be.')
reslist = []
for query_result in query_results:
for block in query_result.responses:
reslist.append(block.client.fetch(block, path=path, downloader=downloader, wait=False, **kwargs))
results = downloader.download()
for result in reslist:
if (result is None):
continue
if (not isinstance(result, Results)):
raise TypeError('If wait is False a client must return a parfive.Downloader and either None or a parfive.Results object.')
results.data += result.data
results._errors += result.errors
return results<|docstring|>Download the records represented by
`~sunpy.net.fido_factory.UnifiedResponse` objects.
Parameters
----------
query_results : `sunpy.net.fido_factory.UnifiedResponse`
Container returned by query method, or multiple.
path : `str`
The directory to retrieve the files into. Can refer to any fields
in `UnifiedResponse.response_block_properties` via string formatting,
moreover the file-name of the file downloaded can be referred to as file,
e.g. "{source}/{instrument}/{time.start}/{file}".
max_conn : `int`, optional
The number of parallel download slots.
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bars will be shown at all.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if `'unique'` the filename
will be modified to be unique.
downloader : `parfive.Downloader`, optional
The download manager to use. If specified the ``max_conn``,
``progress`` and ``overwrite`` arguments are ignored.
Returns
-------
`parfive.Results`
Examples
--------
>>> from sunpy.net.attrs import Time, Instrument
>>> unifresp = Fido.search(Time('2012/3/4','2012/3/5'), Instrument('EIT')) # doctest: +REMOTE_DATA
>>> filepaths = Fido.fetch(unifresp) # doctest: +SKIP
If any downloads fail, they can be retried by passing the `parfive.Results` object back into ``fetch``.
>>> filepaths = Fido.fetch(filepaths) # doctest: +SKIP<|endoftext|>
|
b67c11edef84544e881ecb3ece1bab95a91d53255e8997eee851a0543ab1dd49
|
def _check_registered_widgets(self, *args):
'Factory helper function'
candidate_widget_types = list()
for key in self.registry:
if self.registry[key](*args):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if (n_matches == 0):
raise NoMatchError('This query was not understood by any clients. Did you miss an OR?')
elif (n_matches == 2):
if (VSOClient in candidate_widget_types):
candidate_widget_types.remove(VSOClient)
if (len(candidate_widget_types) > 1):
candidate_names = [cls.__name__ for cls in candidate_widget_types]
raise MultipleMatchError('The following clients matched this query. Please make your query more specific.\n{}'.format(candidate_names))
return candidate_widget_types
|
Factory helper function
|
sunpy/net/fido_factory.py
|
_check_registered_widgets
|
amogh-jrules/sunpy
| 0
|
python
|
def _check_registered_widgets(self, *args):
candidate_widget_types = list()
for key in self.registry:
if self.registry[key](*args):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if (n_matches == 0):
raise NoMatchError('This query was not understood by any clients. Did you miss an OR?')
elif (n_matches == 2):
if (VSOClient in candidate_widget_types):
candidate_widget_types.remove(VSOClient)
if (len(candidate_widget_types) > 1):
candidate_names = [cls.__name__ for cls in candidate_widget_types]
raise MultipleMatchError('The following clients matched this query. Please make your query more specific.\n{}'.format(candidate_names))
return candidate_widget_types
|
def _check_registered_widgets(self, *args):
candidate_widget_types = list()
for key in self.registry:
if self.registry[key](*args):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if (n_matches == 0):
raise NoMatchError('This query was not understood by any clients. Did you miss an OR?')
elif (n_matches == 2):
if (VSOClient in candidate_widget_types):
candidate_widget_types.remove(VSOClient)
if (len(candidate_widget_types) > 1):
candidate_names = [cls.__name__ for cls in candidate_widget_types]
raise MultipleMatchError('The following clients matched this query. Please make your query more specific.\n{}'.format(candidate_names))
return candidate_widget_types<|docstring|>Factory helper function<|endoftext|>
|
390a202422c18c742da14fe3881d10e6806ee934a0316ead6a4a9c5b2fd5822d
|
def _make_query_to_client(self, *query):
'\n Given a query, look up the client and perform the query.\n\n Parameters\n ----------\n query : collection of `~sunpy.net.vso.attr` objects\n\n Returns\n -------\n response : `~sunpy.net.dataretriever.client.QueryResponse`\n\n client : `object`\n Instance of client class\n '
candidate_widget_types = self._check_registered_widgets(*query)
tmpclient = candidate_widget_types[0]()
return (tmpclient.search(*query), tmpclient)
|
Given a query, look up the client and perform the query.
Parameters
----------
query : collection of `~sunpy.net.vso.attr` objects
Returns
-------
response : `~sunpy.net.dataretriever.client.QueryResponse`
client : `object`
Instance of client class
|
sunpy/net/fido_factory.py
|
_make_query_to_client
|
amogh-jrules/sunpy
| 0
|
python
|
def _make_query_to_client(self, *query):
'\n Given a query, look up the client and perform the query.\n\n Parameters\n ----------\n query : collection of `~sunpy.net.vso.attr` objects\n\n Returns\n -------\n response : `~sunpy.net.dataretriever.client.QueryResponse`\n\n client : `object`\n Instance of client class\n '
candidate_widget_types = self._check_registered_widgets(*query)
tmpclient = candidate_widget_types[0]()
return (tmpclient.search(*query), tmpclient)
|
def _make_query_to_client(self, *query):
'\n Given a query, look up the client and perform the query.\n\n Parameters\n ----------\n query : collection of `~sunpy.net.vso.attr` objects\n\n Returns\n -------\n response : `~sunpy.net.dataretriever.client.QueryResponse`\n\n client : `object`\n Instance of client class\n '
candidate_widget_types = self._check_registered_widgets(*query)
tmpclient = candidate_widget_types[0]()
return (tmpclient.search(*query), tmpclient)<|docstring|>Given a query, look up the client and perform the query.
Parameters
----------
query : collection of `~sunpy.net.vso.attr` objects
Returns
-------
response : `~sunpy.net.dataretriever.client.QueryResponse`
client : `object`
Instance of client class<|endoftext|>
|
d56a9051e9ad7ab132a875c78fe26acf1b64faf6d279e381184afc64f3a37432
|
def __init__(self, filename, abs2prom, abs2meta, prom2abs):
"\n Initialize.\n\n Parameters\n ----------\n filename : str\n The name of the recording file from which to instantiate the case reader.\n abs2prom : {'input': dict, 'output': dict}\n Dictionary mapping absolute names to promoted names.\n abs2meta : dict\n Dictionary mapping absolute variable names to variable metadata.\n prom2abs : {'input': dict, 'output': dict}\n Dictionary mapping promoted names to absolute names.\n "
self._case_keys = ()
self.num_cases = 0
self.filename = filename
self._abs2prom = abs2prom
self._abs2meta = abs2meta
self._prom2abs = prom2abs
|
Initialize.
Parameters
----------
filename : str
The name of the recording file from which to instantiate the case reader.
abs2prom : {'input': dict, 'output': dict}
Dictionary mapping absolute names to promoted names.
abs2meta : dict
Dictionary mapping absolute variable names to variable metadata.
prom2abs : {'input': dict, 'output': dict}
Dictionary mapping promoted names to absolute names.
|
openmdao/recorders/cases.py
|
__init__
|
ardalanghadimi/ATC
| 0
|
python
|
def __init__(self, filename, abs2prom, abs2meta, prom2abs):
"\n Initialize.\n\n Parameters\n ----------\n filename : str\n The name of the recording file from which to instantiate the case reader.\n abs2prom : {'input': dict, 'output': dict}\n Dictionary mapping absolute names to promoted names.\n abs2meta : dict\n Dictionary mapping absolute variable names to variable metadata.\n prom2abs : {'input': dict, 'output': dict}\n Dictionary mapping promoted names to absolute names.\n "
self._case_keys = ()
self.num_cases = 0
self.filename = filename
self._abs2prom = abs2prom
self._abs2meta = abs2meta
self._prom2abs = prom2abs
|
def __init__(self, filename, abs2prom, abs2meta, prom2abs):
"\n Initialize.\n\n Parameters\n ----------\n filename : str\n The name of the recording file from which to instantiate the case reader.\n abs2prom : {'input': dict, 'output': dict}\n Dictionary mapping absolute names to promoted names.\n abs2meta : dict\n Dictionary mapping absolute variable names to variable metadata.\n prom2abs : {'input': dict, 'output': dict}\n Dictionary mapping promoted names to absolute names.\n "
self._case_keys = ()
self.num_cases = 0
self.filename = filename
self._abs2prom = abs2prom
self._abs2meta = abs2meta
self._prom2abs = prom2abs<|docstring|>Initialize.
Parameters
----------
filename : str
The name of the recording file from which to instantiate the case reader.
abs2prom : {'input': dict, 'output': dict}
Dictionary mapping absolute names to promoted names.
abs2meta : dict
Dictionary mapping absolute variable names to variable metadata.
prom2abs : {'input': dict, 'output': dict}
Dictionary mapping promoted names to absolute names.<|endoftext|>
|
9ffefed58df8e491f2dd4de77fe8e33685d654d875783808c03d3ef67a8a19d7
|
@abstractmethod
def get_case(self, case_id):
'\n Get cases.\n\n Parameters\n ----------\n case_id : str or int\n If int, the index of the case to be read in the case iterations.\n If given as a string, it is the identifier of the case.\n\n Returns\n -------\n Case : object\n The case from the recorded file with the given identifier or index.\n\n '
pass
|
Get cases.
Parameters
----------
case_id : str or int
If int, the index of the case to be read in the case iterations.
If given as a string, it is the identifier of the case.
Returns
-------
Case : object
The case from the recorded file with the given identifier or index.
|
openmdao/recorders/cases.py
|
get_case
|
ardalanghadimi/ATC
| 0
|
python
|
@abstractmethod
def get_case(self, case_id):
'\n Get cases.\n\n Parameters\n ----------\n case_id : str or int\n If int, the index of the case to be read in the case iterations.\n If given as a string, it is the identifier of the case.\n\n Returns\n -------\n Case : object\n The case from the recorded file with the given identifier or index.\n\n '
pass
|
@abstractmethod
def get_case(self, case_id):
'\n Get cases.\n\n Parameters\n ----------\n case_id : str or int\n If int, the index of the case to be read in the case iterations.\n If given as a string, it is the identifier of the case.\n\n Returns\n -------\n Case : object\n The case from the recorded file with the given identifier or index.\n\n '
pass<|docstring|>Get cases.
Parameters
----------
case_id : str or int
If int, the index of the case to be read in the case iterations.
If given as a string, it is the identifier of the case.
Returns
-------
Case : object
The case from the recorded file with the given identifier or index.<|endoftext|>
|
15c9f3d6562cf55fd38845a34a8885bb2d0da71cf46feb1f6f0056a989b86986
|
def list_cases(self):
'\n Return a tuple of the case string identifiers available in this instance of the CaseReader.\n\n Returns\n -------\n _case_keys : tuple\n The case string identifiers.\n '
return self._case_keys
|
Return a tuple of the case string identifiers available in this instance of the CaseReader.
Returns
-------
_case_keys : tuple
The case string identifiers.
|
openmdao/recorders/cases.py
|
list_cases
|
ardalanghadimi/ATC
| 0
|
python
|
def list_cases(self):
'\n Return a tuple of the case string identifiers available in this instance of the CaseReader.\n\n Returns\n -------\n _case_keys : tuple\n The case string identifiers.\n '
return self._case_keys
|
def list_cases(self):
'\n Return a tuple of the case string identifiers available in this instance of the CaseReader.\n\n Returns\n -------\n _case_keys : tuple\n The case string identifiers.\n '
return self._case_keys<|docstring|>Return a tuple of the case string identifiers available in this instance of the CaseReader.
Returns
-------
_case_keys : tuple
The case string identifiers.<|endoftext|>
|
42043feda307df50d494f2abce41bc5aac91ecd425bf5b41b6d56266a97145df
|
def get_iteration_coordinate(self, case_id):
'\n Return the iteration coordinate.\n\n Parameters\n ----------\n case_id : int\n The case number that we want the iteration coordinate for.\n\n Returns\n -------\n iteration_coordinate : str\n The iteration coordinate.\n '
if isinstance(case_id, int):
iteration_coordinate = self._case_keys[case_id]
else:
iteration_coordinate = case_id
return iteration_coordinate
|
Return the iteration coordinate.
Parameters
----------
case_id : int
The case number that we want the iteration coordinate for.
Returns
-------
iteration_coordinate : str
The iteration coordinate.
|
openmdao/recorders/cases.py
|
get_iteration_coordinate
|
ardalanghadimi/ATC
| 0
|
python
|
def get_iteration_coordinate(self, case_id):
'\n Return the iteration coordinate.\n\n Parameters\n ----------\n case_id : int\n The case number that we want the iteration coordinate for.\n\n Returns\n -------\n iteration_coordinate : str\n The iteration coordinate.\n '
if isinstance(case_id, int):
iteration_coordinate = self._case_keys[case_id]
else:
iteration_coordinate = case_id
return iteration_coordinate
|
def get_iteration_coordinate(self, case_id):
'\n Return the iteration coordinate.\n\n Parameters\n ----------\n case_id : int\n The case number that we want the iteration coordinate for.\n\n Returns\n -------\n iteration_coordinate : str\n The iteration coordinate.\n '
if isinstance(case_id, int):
iteration_coordinate = self._case_keys[case_id]
else:
iteration_coordinate = case_id
return iteration_coordinate<|docstring|>Return the iteration coordinate.
Parameters
----------
case_id : int
The case number that we want the iteration coordinate for.
Returns
-------
iteration_coordinate : str
The iteration coordinate.<|endoftext|>
|
87e89118ceae48eba54c747b1c67764d3de3c8a4b26e9731fc17699a14422379
|
def sasena(x):
'\n SASENA function (two variables).\n\n Input\n X - (nsamp x nvar) matrix of experimental design.\n\n Output\n Y - (nsamp x 1) vector of responses.\n '
Y = np.zeros(shape=[np.size(x, 0), 1])
for ii in range(0, np.size(x, 0)):
xtemp = x[(ii, :)]
x1 = xtemp[0]
x2 = xtemp[1]
Y[(ii, 0)] = ((((2 + (0.01 * ((x2 - (x1 ** 2)) ** 2))) + ((1 - x1) ** 2)) + (2 * ((2 - x2) ** 2))) + ((7 * np.sin((0.5 * x1))) * np.sin(((0.7 * x1) * x2))))
return Y
|
SASENA function (two variables).
Input
X - (nsamp x nvar) matrix of experimental design.
Output
Y - (nsamp x 1) vector of responses.
|
kadal/testcase/analyticalfcn/cases.py
|
sasena
|
timjim333/KADAL
| 7
|
python
|
def sasena(x):
'\n SASENA function (two variables).\n\n Input\n X - (nsamp x nvar) matrix of experimental design.\n\n Output\n Y - (nsamp x 1) vector of responses.\n '
Y = np.zeros(shape=[np.size(x, 0), 1])
for ii in range(0, np.size(x, 0)):
xtemp = x[(ii, :)]
x1 = xtemp[0]
x2 = xtemp[1]
Y[(ii, 0)] = ((((2 + (0.01 * ((x2 - (x1 ** 2)) ** 2))) + ((1 - x1) ** 2)) + (2 * ((2 - x2) ** 2))) + ((7 * np.sin((0.5 * x1))) * np.sin(((0.7 * x1) * x2))))
return Y
|
def sasena(x):
'\n SASENA function (two variables).\n\n Input\n X - (nsamp x nvar) matrix of experimental design.\n\n Output\n Y - (nsamp x 1) vector of responses.\n '
Y = np.zeros(shape=[np.size(x, 0), 1])
for ii in range(0, np.size(x, 0)):
xtemp = x[(ii, :)]
x1 = xtemp[0]
x2 = xtemp[1]
Y[(ii, 0)] = ((((2 + (0.01 * ((x2 - (x1 ** 2)) ** 2))) + ((1 - x1) ** 2)) + (2 * ((2 - x2) ** 2))) + ((7 * np.sin((0.5 * x1))) * np.sin(((0.7 * x1) * x2))))
return Y<|docstring|>SASENA function (two variables).
Input
X - (nsamp x nvar) matrix of experimental design.
Output
Y - (nsamp x 1) vector of responses.<|endoftext|>
|
e61f13008b23105a8f0b6d1ec30ceffd7da6b6c096e86a55bab26c2e0bf4601a
|
def schaffer(x):
'\n Generalized Schaffer problem\n Reference: "Emmerich, M. T., & Deutz, A. H. (2007, March). Test problems\n based on Lamé superspheres. In International Conference on Evolutionary\n Multi-Criterion Optimization (pp. 922-936). Springer, Berlin, Heidelberg."\n\n Inputs:\n X: Vector of decision variables\n r: Describes the shape of the Pareto front\n\n Output:\n fitness: fitness function value\n\n Written by Kaifeng Yang, 20/1/2016\n '
r = 1
a = (1 / (2 * r))
m = np.size(x, 0)
n = np.size(x, 1)
fitness = np.zeros(shape=[m, 2])
for i in range(0, m):
fitness[(i, 0)] = ((1 / (n ** a)) * (np.sum((x[(i, :)] ** 2)) ** a))
fitness[(i, 1)] = ((1 / (n ** a)) * (np.sum(((1 - x[(i, :)]) ** 2)) ** a))
if (m == 1):
fitness = fitness[(0, :)]
return fitness
|
Generalized Schaffer problem
Reference: "Emmerich, M. T., & Deutz, A. H. (2007, March). Test problems
based on Lamé superspheres. In International Conference on Evolutionary
Multi-Criterion Optimization (pp. 922-936). Springer, Berlin, Heidelberg."
Inputs:
X: Vector of decision variables
r: Describes the shape of the Pareto front
Output:
fitness: fitness function value
Written by Kaifeng Yang, 20/1/2016
|
kadal/testcase/analyticalfcn/cases.py
|
schaffer
|
timjim333/KADAL
| 7
|
python
|
def schaffer(x):
'\n Generalized Schaffer problem\n Reference: "Emmerich, M. T., & Deutz, A. H. (2007, March). Test problems\n based on Lamé superspheres. In International Conference on Evolutionary\n Multi-Criterion Optimization (pp. 922-936). Springer, Berlin, Heidelberg."\n\n Inputs:\n X: Vector of decision variables\n r: Describes the shape of the Pareto front\n\n Output:\n fitness: fitness function value\n\n Written by Kaifeng Yang, 20/1/2016\n '
r = 1
a = (1 / (2 * r))
m = np.size(x, 0)
n = np.size(x, 1)
fitness = np.zeros(shape=[m, 2])
for i in range(0, m):
fitness[(i, 0)] = ((1 / (n ** a)) * (np.sum((x[(i, :)] ** 2)) ** a))
fitness[(i, 1)] = ((1 / (n ** a)) * (np.sum(((1 - x[(i, :)]) ** 2)) ** a))
if (m == 1):
fitness = fitness[(0, :)]
return fitness
|
def schaffer(x):
'\n Generalized Schaffer problem\n Reference: "Emmerich, M. T., & Deutz, A. H. (2007, March). Test problems\n based on Lamé superspheres. In International Conference on Evolutionary\n Multi-Criterion Optimization (pp. 922-936). Springer, Berlin, Heidelberg."\n\n Inputs:\n X: Vector of decision variables\n r: Describes the shape of the Pareto front\n\n Output:\n fitness: fitness function value\n\n Written by Kaifeng Yang, 20/1/2016\n '
r = 1
a = (1 / (2 * r))
m = np.size(x, 0)
n = np.size(x, 1)
fitness = np.zeros(shape=[m, 2])
for i in range(0, m):
fitness[(i, 0)] = ((1 / (n ** a)) * (np.sum((x[(i, :)] ** 2)) ** a))
fitness[(i, 1)] = ((1 / (n ** a)) * (np.sum(((1 - x[(i, :)]) ** 2)) ** a))
if (m == 1):
fitness = fitness[(0, :)]
return fitness<|docstring|>Generalized Schaffer problem
Reference: "Emmerich, M. T., & Deutz, A. H. (2007, March). Test problems
based on Lamé superspheres. In International Conference on Evolutionary
Multi-Criterion Optimization (pp. 922-936). Springer, Berlin, Heidelberg."
Inputs:
X: Vector of decision variables
r: Describes the shape of the Pareto front
Output:
fitness: fitness function value
Written by Kaifeng Yang, 20/1/2016<|endoftext|>
|
d9adb1856edcf56c8a520fb547431afaa0a41f8e69ca84cedad9027cb7995208
|
def __init__(self, expr: str, col: ColumnClause, **kwargs):
"Sqlalchemy class that can be can be used to render native column elements\n respeting engine-specific quoting rules as part of a string-based expression.\n\n :param expr: Sql expression with '{col}' denoting the locations where the col\n object will be rendered.\n :param col: the target column\n "
super().__init__(expr, **kwargs)
self.col = col
|
Sqlalchemy class that can be can be used to render native column elements
respeting engine-specific quoting rules as part of a string-based expression.
:param expr: Sql expression with '{col}' denoting the locations where the col
object will be rendered.
:param col: the target column
|
superset/db_engine_specs.py
|
__init__
|
riskilla/incubator-superset
| 1
|
python
|
def __init__(self, expr: str, col: ColumnClause, **kwargs):
"Sqlalchemy class that can be can be used to render native column elements\n respeting engine-specific quoting rules as part of a string-based expression.\n\n :param expr: Sql expression with '{col}' denoting the locations where the col\n object will be rendered.\n :param col: the target column\n "
super().__init__(expr, **kwargs)
self.col = col
|
def __init__(self, expr: str, col: ColumnClause, **kwargs):
"Sqlalchemy class that can be can be used to render native column elements\n respeting engine-specific quoting rules as part of a string-based expression.\n\n :param expr: Sql expression with '{col}' denoting the locations where the col\n object will be rendered.\n :param col: the target column\n "
super().__init__(expr, **kwargs)
self.col = col<|docstring|>Sqlalchemy class that can be can be used to render native column elements
respeting engine-specific quoting rules as part of a string-based expression.
:param expr: Sql expression with '{col}' denoting the locations where the col
object will be rendered.
:param col: the target column<|endoftext|>
|
4957ee93e500cec5609250527ba591681b5093b4da1870aa22fc2318ba9f9c09
|
@classmethod
def get_timestamp_expr(cls, col: ColumnClause, pdf: Optional[str], time_grain: Optional[str]) -> TimestampExpression:
'\n Construct a TimeExpression to be used in a SQLAlchemy query.\n\n :param col: Target column for the TimeExpression\n :param pdf: date format (seconds or milliseconds)\n :param time_grain: time grain, e.g. P1Y for 1 year\n :return: TimestampExpression object\n '
if time_grain:
time_expr = cls.time_grain_functions.get(time_grain)
if (not time_expr):
raise NotImplementedError(f'No grain spec for {time_grain} for database {cls.engine}')
else:
time_expr = '{col}'
if (pdf == 'epoch_s'):
time_expr = time_expr.replace('{col}', cls.epoch_to_dttm())
elif (pdf == 'epoch_ms'):
time_expr = time_expr.replace('{col}', cls.epoch_ms_to_dttm())
return TimestampExpression(time_expr, col, type_=DateTime)
|
Construct a TimeExpression to be used in a SQLAlchemy query.
:param col: Target column for the TimeExpression
:param pdf: date format (seconds or milliseconds)
:param time_grain: time grain, e.g. P1Y for 1 year
:return: TimestampExpression object
|
superset/db_engine_specs.py
|
get_timestamp_expr
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_timestamp_expr(cls, col: ColumnClause, pdf: Optional[str], time_grain: Optional[str]) -> TimestampExpression:
'\n Construct a TimeExpression to be used in a SQLAlchemy query.\n\n :param col: Target column for the TimeExpression\n :param pdf: date format (seconds or milliseconds)\n :param time_grain: time grain, e.g. P1Y for 1 year\n :return: TimestampExpression object\n '
if time_grain:
time_expr = cls.time_grain_functions.get(time_grain)
if (not time_expr):
raise NotImplementedError(f'No grain spec for {time_grain} for database {cls.engine}')
else:
time_expr = '{col}'
if (pdf == 'epoch_s'):
time_expr = time_expr.replace('{col}', cls.epoch_to_dttm())
elif (pdf == 'epoch_ms'):
time_expr = time_expr.replace('{col}', cls.epoch_ms_to_dttm())
return TimestampExpression(time_expr, col, type_=DateTime)
|
@classmethod
def get_timestamp_expr(cls, col: ColumnClause, pdf: Optional[str], time_grain: Optional[str]) -> TimestampExpression:
'\n Construct a TimeExpression to be used in a SQLAlchemy query.\n\n :param col: Target column for the TimeExpression\n :param pdf: date format (seconds or milliseconds)\n :param time_grain: time grain, e.g. P1Y for 1 year\n :return: TimestampExpression object\n '
if time_grain:
time_expr = cls.time_grain_functions.get(time_grain)
if (not time_expr):
raise NotImplementedError(f'No grain spec for {time_grain} for database {cls.engine}')
else:
time_expr = '{col}'
if (pdf == 'epoch_s'):
time_expr = time_expr.replace('{col}', cls.epoch_to_dttm())
elif (pdf == 'epoch_ms'):
time_expr = time_expr.replace('{col}', cls.epoch_ms_to_dttm())
return TimestampExpression(time_expr, col, type_=DateTime)<|docstring|>Construct a TimeExpression to be used in a SQLAlchemy query.
:param col: Target column for the TimeExpression
:param pdf: date format (seconds or milliseconds)
:param time_grain: time grain, e.g. P1Y for 1 year
:return: TimestampExpression object<|endoftext|>
|
dc04632344397f772431e8169c93d5c22d24d884e5d95065f55982bb7ab708b9
|
@classmethod
def alter_new_orm_column(cls, orm_col):
'Allow altering default column attributes when first detected/added\n\n For instance special column like `__time` for Druid can be\n set to is_dttm=True. Note that this only gets called when new\n columns are detected/created'
pass
|
Allow altering default column attributes when first detected/added
For instance special column like `__time` for Druid can be
set to is_dttm=True. Note that this only gets called when new
columns are detected/created
|
superset/db_engine_specs.py
|
alter_new_orm_column
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def alter_new_orm_column(cls, orm_col):
'Allow altering default column attributes when first detected/added\n\n For instance special column like `__time` for Druid can be\n set to is_dttm=True. Note that this only gets called when new\n columns are detected/created'
pass
|
@classmethod
def alter_new_orm_column(cls, orm_col):
'Allow altering default column attributes when first detected/added\n\n For instance special column like `__time` for Druid can be\n set to is_dttm=True. Note that this only gets called when new\n columns are detected/created'
pass<|docstring|>Allow altering default column attributes when first detected/added
For instance special column like `__time` for Druid can be
set to is_dttm=True. Note that this only gets called when new
columns are detected/created<|endoftext|>
|
e171c46b2c515a00945bda3a6fcacdf12e364cacfcc033ae22bf8cebe6c21b15
|
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
'Returns engine-specific table metadata'
return {}
|
Returns engine-specific table metadata
|
superset/db_engine_specs.py
|
extra_table_metadata
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
return {}
|
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
return {}<|docstring|>Returns engine-specific table metadata<|endoftext|>
|
a0d1d48559d8f8c711bae2df29ff816ca60c3d727bd471cf392e9ffc4be38460
|
@classmethod
def apply_limit_to_sql(cls, sql, limit, database):
'Alters the SQL statement to apply a LIMIT clause'
if (cls.limit_method == LimitMethod.WRAP_SQL):
sql = sql.strip('\t\n ;')
qry = select('*').select_from(TextAsFrom(text(sql), ['*']).alias('inner_qry')).limit(limit)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.get_query_with_new_limit(limit)
return sql
|
Alters the SQL statement to apply a LIMIT clause
|
superset/db_engine_specs.py
|
apply_limit_to_sql
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def apply_limit_to_sql(cls, sql, limit, database):
if (cls.limit_method == LimitMethod.WRAP_SQL):
sql = sql.strip('\t\n ;')
qry = select('*').select_from(TextAsFrom(text(sql), ['*']).alias('inner_qry')).limit(limit)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.get_query_with_new_limit(limit)
return sql
|
@classmethod
def apply_limit_to_sql(cls, sql, limit, database):
if (cls.limit_method == LimitMethod.WRAP_SQL):
sql = sql.strip('\t\n ;')
qry = select('*').select_from(TextAsFrom(text(sql), ['*']).alias('inner_qry')).limit(limit)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.get_query_with_new_limit(limit)
return sql<|docstring|>Alters the SQL statement to apply a LIMIT clause<|endoftext|>
|
8acec19044cb4eb41696b5e8ac7a3f74b66de6efbbd45d4e8865f700e7b908a5
|
@classmethod
def get_all_datasource_names(cls, db, datasource_type: str) -> List[utils.DatasourceName]:
"Returns a list of all tables or views in database.\n\n :param db: Database instance\n :param datasource_type: Datasource_type can be 'table' or 'view'\n :return: List of all datasources in database or schema\n "
schemas = db.get_all_schema_names(cache=db.schema_cache_enabled, cache_timeout=db.schema_cache_timeout, force=True)
all_datasources: List[utils.DatasourceName] = []
for schema in schemas:
if (datasource_type == 'table'):
all_datasources += db.get_all_table_names_in_schema(schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout)
elif (datasource_type == 'view'):
all_datasources += db.get_all_view_names_in_schema(schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout)
else:
raise Exception(f'Unsupported datasource_type: {datasource_type}')
return all_datasources
|
Returns a list of all tables or views in database.
:param db: Database instance
:param datasource_type: Datasource_type can be 'table' or 'view'
:return: List of all datasources in database or schema
|
superset/db_engine_specs.py
|
get_all_datasource_names
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_all_datasource_names(cls, db, datasource_type: str) -> List[utils.DatasourceName]:
"Returns a list of all tables or views in database.\n\n :param db: Database instance\n :param datasource_type: Datasource_type can be 'table' or 'view'\n :return: List of all datasources in database or schema\n "
schemas = db.get_all_schema_names(cache=db.schema_cache_enabled, cache_timeout=db.schema_cache_timeout, force=True)
all_datasources: List[utils.DatasourceName] = []
for schema in schemas:
if (datasource_type == 'table'):
all_datasources += db.get_all_table_names_in_schema(schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout)
elif (datasource_type == 'view'):
all_datasources += db.get_all_view_names_in_schema(schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout)
else:
raise Exception(f'Unsupported datasource_type: {datasource_type}')
return all_datasources
|
@classmethod
def get_all_datasource_names(cls, db, datasource_type: str) -> List[utils.DatasourceName]:
"Returns a list of all tables or views in database.\n\n :param db: Database instance\n :param datasource_type: Datasource_type can be 'table' or 'view'\n :return: List of all datasources in database or schema\n "
schemas = db.get_all_schema_names(cache=db.schema_cache_enabled, cache_timeout=db.schema_cache_timeout, force=True)
all_datasources: List[utils.DatasourceName] = []
for schema in schemas:
if (datasource_type == 'table'):
all_datasources += db.get_all_table_names_in_schema(schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout)
elif (datasource_type == 'view'):
all_datasources += db.get_all_view_names_in_schema(schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout)
else:
raise Exception(f'Unsupported datasource_type: {datasource_type}')
return all_datasources<|docstring|>Returns a list of all tables or views in database.
:param db: Database instance
:param datasource_type: Datasource_type can be 'table' or 'view'
:return: List of all datasources in database or schema<|endoftext|>
|
b4895ec87cca3c6c75b895d4321bfa59abcb7b81ec6e73293b68df684c6bde7a
|
@classmethod
def handle_cursor(cls, cursor, query, session):
'Handle a live cursor between the execute and fetchall calls\n\n The flow works without this method doing anything, but it allows\n for handling the cursor and updating progress information in the\n query object'
pass
|
Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object
|
superset/db_engine_specs.py
|
handle_cursor
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def handle_cursor(cls, cursor, query, session):
'Handle a live cursor between the execute and fetchall calls\n\n The flow works without this method doing anything, but it allows\n for handling the cursor and updating progress information in the\n query object'
pass
|
@classmethod
def handle_cursor(cls, cursor, query, session):
'Handle a live cursor between the execute and fetchall calls\n\n The flow works without this method doing anything, but it allows\n for handling the cursor and updating progress information in the\n query object'
pass<|docstring|>Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object<|endoftext|>
|
533004d372638d1b3311bade2f9528d4fa8f523450391c9bd81b06dd6175265a
|
@classmethod
def extract_error_message(cls, e):
'Extract error message for queries'
return utils.error_msg_from_exception(e)
|
Extract error message for queries
|
superset/db_engine_specs.py
|
extract_error_message
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def extract_error_message(cls, e):
return utils.error_msg_from_exception(e)
|
@classmethod
def extract_error_message(cls, e):
return utils.error_msg_from_exception(e)<|docstring|>Extract error message for queries<|endoftext|>
|
0242077610d59c74d2ce782eb325334b8fdc68d041c915ec57bf6e77598a3777
|
@classmethod
def adjust_database_uri(cls, uri, selected_schema):
"Based on a URI and selected schema, return a new URI\n\n The URI here represents the URI as entered when saving the database,\n ``selected_schema`` is the schema currently active presumably in\n the SQL Lab dropdown. Based on that, for some database engine,\n we can return a new altered URI that connects straight to the\n active schema, meaning the users won't have to prefix the object\n names by the schema name.\n\n Some databases engines have 2 level of namespacing: database and\n schema (postgres, oracle, mssql, ...)\n For those it's probably better to not alter the database\n component of the URI with the schema name, it won't work.\n\n Some database drivers like presto accept '{catalog}/{schema}' in\n the database component of the URL, that can be handled here.\n "
return uri
|
Based on a URI and selected schema, return a new URI
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
|
superset/db_engine_specs.py
|
adjust_database_uri
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def adjust_database_uri(cls, uri, selected_schema):
"Based on a URI and selected schema, return a new URI\n\n The URI here represents the URI as entered when saving the database,\n ``selected_schema`` is the schema currently active presumably in\n the SQL Lab dropdown. Based on that, for some database engine,\n we can return a new altered URI that connects straight to the\n active schema, meaning the users won't have to prefix the object\n names by the schema name.\n\n Some databases engines have 2 level of namespacing: database and\n schema (postgres, oracle, mssql, ...)\n For those it's probably better to not alter the database\n component of the URI with the schema name, it won't work.\n\n Some database drivers like presto accept '{catalog}/{schema}' in\n the database component of the URL, that can be handled here.\n "
return uri
|
@classmethod
def adjust_database_uri(cls, uri, selected_schema):
"Based on a URI and selected schema, return a new URI\n\n The URI here represents the URI as entered when saving the database,\n ``selected_schema`` is the schema currently active presumably in\n the SQL Lab dropdown. Based on that, for some database engine,\n we can return a new altered URI that connects straight to the\n active schema, meaning the users won't have to prefix the object\n names by the schema name.\n\n Some databases engines have 2 level of namespacing: database and\n schema (postgres, oracle, mssql, ...)\n For those it's probably better to not alter the database\n component of the URI with the schema name, it won't work.\n\n Some database drivers like presto accept '{catalog}/{schema}' in\n the database component of the URL, that can be handled here.\n "
return uri<|docstring|>Based on a URI and selected schema, return a new URI
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.<|endoftext|>
|
fa169524ae9d0637fb7bf9c505453ca7c6f5dfd21eaffdf2ee69eeb1035f3df1
|
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
'\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n '
if ((impersonate_user is not None) and (username is not None)):
url.username = username
|
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
|
superset/db_engine_specs.py
|
modify_url_for_impersonation
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
'\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n '
if ((impersonate_user is not None) and (username is not None)):
url.username = username
|
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
'\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n '
if ((impersonate_user is not None) and (username is not None)):
url.username = username<|docstring|>Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username<|endoftext|>
|
e0a6996a59e705d52736df3ab0141189533a0061ac60602e0d78cb8fadafdf65
|
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
'\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n '
return {}
|
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation
|
superset/db_engine_specs.py
|
get_configuration_for_impersonation
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
'\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n '
return {}
|
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
'\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n '
return {}<|docstring|>Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation<|endoftext|>
|
92311832f21dae18ef55fb0df23d679005c25222b78a399f796386b616d8551a
|
@classmethod
def make_label_compatible(cls, label):
'\n Conditionally mutate and/or quote a sql column/expression label. If\n force_column_alias_quotes is set to True, return the label as a\n sqlalchemy.sql.elements.quoted_name object to ensure that the select query\n and query results have same case. Otherwise return the mutated label as a\n regular string. If maxmimum supported column name length is exceeded,\n generate a truncated label by calling truncate_label().\n '
label_mutated = cls.mutate_label(label)
if (cls.max_column_name_length and (len(label_mutated) > cls.max_column_name_length)):
label_mutated = cls.truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated
|
Conditionally mutate and/or quote a sql column/expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise return the mutated label as a
regular string. If maxmimum supported column name length is exceeded,
generate a truncated label by calling truncate_label().
|
superset/db_engine_specs.py
|
make_label_compatible
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def make_label_compatible(cls, label):
'\n Conditionally mutate and/or quote a sql column/expression label. If\n force_column_alias_quotes is set to True, return the label as a\n sqlalchemy.sql.elements.quoted_name object to ensure that the select query\n and query results have same case. Otherwise return the mutated label as a\n regular string. If maxmimum supported column name length is exceeded,\n generate a truncated label by calling truncate_label().\n '
label_mutated = cls.mutate_label(label)
if (cls.max_column_name_length and (len(label_mutated) > cls.max_column_name_length)):
label_mutated = cls.truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated
|
@classmethod
def make_label_compatible(cls, label):
'\n Conditionally mutate and/or quote a sql column/expression label. If\n force_column_alias_quotes is set to True, return the label as a\n sqlalchemy.sql.elements.quoted_name object to ensure that the select query\n and query results have same case. Otherwise return the mutated label as a\n regular string. If maxmimum supported column name length is exceeded,\n generate a truncated label by calling truncate_label().\n '
label_mutated = cls.mutate_label(label)
if (cls.max_column_name_length and (len(label_mutated) > cls.max_column_name_length)):
label_mutated = cls.truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated<|docstring|>Conditionally mutate and/or quote a sql column/expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise return the mutated label as a
regular string. If maxmimum supported column name length is exceeded,
generate a truncated label by calling truncate_label().<|endoftext|>
|
21d1d569a340fa177aeda22c1bd45ec64bb5f474964c31507164d8725c8d8db2
|
@classmethod
def get_sqla_column_type(cls, type_):
'\n Return a sqlalchemy native column type that corresponds to the column type\n defined in the data source (optional). Needs to be overridden if column requires\n special handling (see MSSQL for example of NCHAR/NVARCHAR handling).\n '
return None
|
Return a sqlalchemy native column type that corresponds to the column type
defined in the data source (optional). Needs to be overridden if column requires
special handling (see MSSQL for example of NCHAR/NVARCHAR handling).
|
superset/db_engine_specs.py
|
get_sqla_column_type
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_sqla_column_type(cls, type_):
'\n Return a sqlalchemy native column type that corresponds to the column type\n defined in the data source (optional). Needs to be overridden if column requires\n special handling (see MSSQL for example of NCHAR/NVARCHAR handling).\n '
return None
|
@classmethod
def get_sqla_column_type(cls, type_):
'\n Return a sqlalchemy native column type that corresponds to the column type\n defined in the data source (optional). Needs to be overridden if column requires\n special handling (see MSSQL for example of NCHAR/NVARCHAR handling).\n '
return None<|docstring|>Return a sqlalchemy native column type that corresponds to the column type
defined in the data source (optional). Needs to be overridden if column requires
special handling (see MSSQL for example of NCHAR/NVARCHAR handling).<|endoftext|>
|
2f0ff1992a32dca912d56fd6cb7f6c9a43b1b0cc50e852aadcb02721e09a2b10
|
@staticmethod
def mutate_label(label):
"\n Most engines support mixed case aliases that can include numbers\n and special characters, like commas, parentheses etc. For engines that\n have restrictions on what types of aliases are supported, this method\n can be overridden to ensure that labels conform to the engine's\n limitations. Mutated labels should be deterministic (input label A always\n yields output label X) and unique (input labels A and B don't yield the same\n output label X).\n "
return label
|
Most engines support mixed case aliases that can include numbers
and special characters, like commas, parentheses etc. For engines that
have restrictions on what types of aliases are supported, this method
can be overridden to ensure that labels conform to the engine's
limitations. Mutated labels should be deterministic (input label A always
yields output label X) and unique (input labels A and B don't yield the same
output label X).
|
superset/db_engine_specs.py
|
mutate_label
|
riskilla/incubator-superset
| 1
|
python
|
@staticmethod
def mutate_label(label):
"\n Most engines support mixed case aliases that can include numbers\n and special characters, like commas, parentheses etc. For engines that\n have restrictions on what types of aliases are supported, this method\n can be overridden to ensure that labels conform to the engine's\n limitations. Mutated labels should be deterministic (input label A always\n yields output label X) and unique (input labels A and B don't yield the same\n output label X).\n "
return label
|
@staticmethod
def mutate_label(label):
"\n Most engines support mixed case aliases that can include numbers\n and special characters, like commas, parentheses etc. For engines that\n have restrictions on what types of aliases are supported, this method\n can be overridden to ensure that labels conform to the engine's\n limitations. Mutated labels should be deterministic (input label A always\n yields output label X) and unique (input labels A and B don't yield the same\n output label X).\n "
return label<|docstring|>Most engines support mixed case aliases that can include numbers
and special characters, like commas, parentheses etc. For engines that
have restrictions on what types of aliases are supported, this method
can be overridden to ensure that labels conform to the engine's
limitations. Mutated labels should be deterministic (input label A always
yields output label X) and unique (input labels A and B don't yield the same
output label X).<|endoftext|>
|
78b577a4304b9bc1f70352f44400700a8af0097d33201ed540c272581769da2c
|
@classmethod
def truncate_label(cls, label):
'\n In the case that a label exceeds the max length supported by the engine,\n this method is used to construct a deterministic and unique label based on\n an md5 hash.\n '
label = hashlib.md5(label.encode('utf-8')).hexdigest()
if (cls.max_column_name_length and (len(label) > cls.max_column_name_length)):
label = label[:cls.max_column_name_length]
return label
|
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
|
superset/db_engine_specs.py
|
truncate_label
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def truncate_label(cls, label):
'\n In the case that a label exceeds the max length supported by the engine,\n this method is used to construct a deterministic and unique label based on\n an md5 hash.\n '
label = hashlib.md5(label.encode('utf-8')).hexdigest()
if (cls.max_column_name_length and (len(label) > cls.max_column_name_length)):
label = label[:cls.max_column_name_length]
return label
|
@classmethod
def truncate_label(cls, label):
'\n In the case that a label exceeds the max length supported by the engine,\n this method is used to construct a deterministic and unique label based on\n an md5 hash.\n '
label = hashlib.md5(label.encode('utf-8')).hexdigest()
if (cls.max_column_name_length and (len(label) > cls.max_column_name_length)):
label = label[:cls.max_column_name_length]
return label<|docstring|>In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.<|endoftext|>
|
3af84d915b2e8f63fb199212530595b971198b0ebb53ad390482496922d48b00
|
@classmethod
def get_table_names(cls, inspector, schema):
'Need to consider foreign tables for PostgreSQL'
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables)
|
Need to consider foreign tables for PostgreSQL
|
superset/db_engine_specs.py
|
get_table_names
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_table_names(cls, inspector, schema):
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables)
|
@classmethod
def get_table_names(cls, inspector, schema):
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables)<|docstring|>Need to consider foreign tables for PostgreSQL<|endoftext|>
|
4adba396d9c5f8267174efa19a5bcc65084e0164dac4313b0749c1f3ca54ce93
|
@staticmethod
def mutate_label(label):
'\n Redshift only supports lowercase column names and aliases.\n :param str label: Original label which might include uppercase letters\n :return: String that is supported by the database\n '
return label.lower()
|
Redshift only supports lowercase column names and aliases.
:param str label: Original label which might include uppercase letters
:return: String that is supported by the database
|
superset/db_engine_specs.py
|
mutate_label
|
riskilla/incubator-superset
| 1
|
python
|
@staticmethod
def mutate_label(label):
'\n Redshift only supports lowercase column names and aliases.\n :param str label: Original label which might include uppercase letters\n :return: String that is supported by the database\n '
return label.lower()
|
@staticmethod
def mutate_label(label):
'\n Redshift only supports lowercase column names and aliases.\n :param str label: Original label which might include uppercase letters\n :return: String that is supported by the database\n '
return label.lower()<|docstring|>Redshift only supports lowercase column names and aliases.
:param str label: Original label which might include uppercase letters
:return: String that is supported by the database<|endoftext|>
|
d3d2b41cb2aa4e0544857f8a8fe25de1868fff82f8ada3973362f1d8349edbad
|
@classmethod
def get_table_names(cls, inspector, schema):
'Need to disregard the schema for Sqlite'
return sorted(inspector.get_table_names())
|
Need to disregard the schema for Sqlite
|
superset/db_engine_specs.py
|
get_table_names
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_table_names(cls, inspector, schema):
return sorted(inspector.get_table_names())
|
@classmethod
def get_table_names(cls, inspector, schema):
return sorted(inspector.get_table_names())<|docstring|>Need to disregard the schema for Sqlite<|endoftext|>
|
7d16e4f07f6b1d08a29104cbd26e707ba44f7c6781ef7d14b1c1a53443f59d50
|
@classmethod
def extract_error_message(cls, e):
'Extract error message for queries'
message = str(e)
try:
if (isinstance(e.args, tuple) and (len(e.args) > 1)):
message = e.args[1]
except Exception:
pass
return message
|
Extract error message for queries
|
superset/db_engine_specs.py
|
extract_error_message
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def extract_error_message(cls, e):
message = str(e)
try:
if (isinstance(e.args, tuple) and (len(e.args) > 1)):
message = e.args[1]
except Exception:
pass
return message
|
@classmethod
def extract_error_message(cls, e):
message = str(e)
try:
if (isinstance(e.args, tuple) and (len(e.args) > 1)):
message = e.args[1]
except Exception:
pass
return message<|docstring|>Extract error message for queries<|endoftext|>
|
20ce1291c2ad89c09e6e5bcd96c19c3edd5ea8d2fbaf7cbe9b3f072d1a72fa73
|
@classmethod
def get_view_names(cls, inspector, schema):
'Returns an empty list\n\n get_table_names() function returns all table names and view names,\n and get_view_names() is not implemented in sqlalchemy_presto.py\n https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py\n '
return []
|
Returns an empty list
get_table_names() function returns all table names and view names,
and get_view_names() is not implemented in sqlalchemy_presto.py
https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py
|
superset/db_engine_specs.py
|
get_view_names
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_view_names(cls, inspector, schema):
'Returns an empty list\n\n get_table_names() function returns all table names and view names,\n and get_view_names() is not implemented in sqlalchemy_presto.py\n https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py\n '
return []
|
@classmethod
def get_view_names(cls, inspector, schema):
'Returns an empty list\n\n get_table_names() function returns all table names and view names,\n and get_view_names() is not implemented in sqlalchemy_presto.py\n https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py\n '
return []<|docstring|>Returns an empty list
get_table_names() function returns all table names and view names,
and get_view_names() is not implemented in sqlalchemy_presto.py
https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py<|endoftext|>
|
47bc79e603f1b7ece90b7a4cbb2ec6c99bf82eeac35c34c10a674708c7e4c009
|
@classmethod
def _create_column_info(cls, name: str, data_type: str) -> dict:
'\n Create column info object\n :param name: column name\n :param data_type: column data type\n :return: column info object\n '
return {'name': name, 'type': f'{data_type}'}
|
Create column info object
:param name: column name
:param data_type: column data type
:return: column info object
|
superset/db_engine_specs.py
|
_create_column_info
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _create_column_info(cls, name: str, data_type: str) -> dict:
'\n Create column info object\n :param name: column name\n :param data_type: column data type\n :return: column info object\n '
return {'name': name, 'type': f'{data_type}'}
|
@classmethod
def _create_column_info(cls, name: str, data_type: str) -> dict:
'\n Create column info object\n :param name: column name\n :param data_type: column data type\n :return: column info object\n '
return {'name': name, 'type': f'{data_type}'}<|docstring|>Create column info object
:param name: column name
:param data_type: column data type
:return: column info object<|endoftext|>
|
1921ef55c292f16acfb750f606065b003fc8e095965e5df4dfcf6189576d7239
|
@classmethod
def _get_full_name(cls, names: List[Tuple[(str, str)]]) -> str:
'\n Get the full column name\n :param names: list of all individual column names\n :return: full column name\n '
return '.'.join((column[0] for column in names if column[0]))
|
Get the full column name
:param names: list of all individual column names
:return: full column name
|
superset/db_engine_specs.py
|
_get_full_name
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _get_full_name(cls, names: List[Tuple[(str, str)]]) -> str:
'\n Get the full column name\n :param names: list of all individual column names\n :return: full column name\n '
return '.'.join((column[0] for column in names if column[0]))
|
@classmethod
def _get_full_name(cls, names: List[Tuple[(str, str)]]) -> str:
'\n Get the full column name\n :param names: list of all individual column names\n :return: full column name\n '
return '.'.join((column[0] for column in names if column[0]))<|docstring|>Get the full column name
:param names: list of all individual column names
:return: full column name<|endoftext|>
|
774a76b00cda8591634cbc828609a6fc6952d18b5b95e8af712cd66eefc29e72
|
@classmethod
def _has_nested_data_types(cls, component_type: str) -> bool:
'\n Check if string contains a data type. We determine if there is a data type by\n whitespace or multiple data types by commas\n :param component_type: data type\n :return: boolean\n '
comma_regex = ',(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
white_space_regex = '\\s(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
return ((re.search(comma_regex, component_type) is not None) or (re.search(white_space_regex, component_type) is not None))
|
Check if string contains a data type. We determine if there is a data type by
whitespace or multiple data types by commas
:param component_type: data type
:return: boolean
|
superset/db_engine_specs.py
|
_has_nested_data_types
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _has_nested_data_types(cls, component_type: str) -> bool:
'\n Check if string contains a data type. We determine if there is a data type by\n whitespace or multiple data types by commas\n :param component_type: data type\n :return: boolean\n '
comma_regex = ',(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
white_space_regex = '\\s(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
return ((re.search(comma_regex, component_type) is not None) or (re.search(white_space_regex, component_type) is not None))
|
@classmethod
def _has_nested_data_types(cls, component_type: str) -> bool:
'\n Check if string contains a data type. We determine if there is a data type by\n whitespace or multiple data types by commas\n :param component_type: data type\n :return: boolean\n '
comma_regex = ',(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
white_space_regex = '\\s(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
return ((re.search(comma_regex, component_type) is not None) or (re.search(white_space_regex, component_type) is not None))<|docstring|>Check if string contains a data type. We determine if there is a data type by
whitespace or multiple data types by commas
:param component_type: data type
:return: boolean<|endoftext|>
|
b727fbd05472024fc3eeca9a46b4ad251d2170de9b9bd3c9de4e3656b817f255
|
@classmethod
def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
'\n Split data type based on given delimiter. Do not split the string if the\n delimiter is enclosed in quotes\n :param data_type: data type\n :param delimiter: string separator (i.e. open parenthesis, closed parenthesis,\n comma, whitespace)\n :return: list of strings after breaking it by the delimiter\n '
return re.split('{}(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'.format(delimiter), data_type)
|
Split data type based on given delimiter. Do not split the string if the
delimiter is enclosed in quotes
:param data_type: data type
:param delimiter: string separator (i.e. open parenthesis, closed parenthesis,
comma, whitespace)
:return: list of strings after breaking it by the delimiter
|
superset/db_engine_specs.py
|
_split_data_type
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
'\n Split data type based on given delimiter. Do not split the string if the\n delimiter is enclosed in quotes\n :param data_type: data type\n :param delimiter: string separator (i.e. open parenthesis, closed parenthesis,\n comma, whitespace)\n :return: list of strings after breaking it by the delimiter\n '
return re.split('{}(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'.format(delimiter), data_type)
|
@classmethod
def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
'\n Split data type based on given delimiter. Do not split the string if the\n delimiter is enclosed in quotes\n :param data_type: data type\n :param delimiter: string separator (i.e. open parenthesis, closed parenthesis,\n comma, whitespace)\n :return: list of strings after breaking it by the delimiter\n '
return re.split('{}(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'.format(delimiter), data_type)<|docstring|>Split data type based on given delimiter. Do not split the string if the
delimiter is enclosed in quotes
:param data_type: data type
:param delimiter: string separator (i.e. open parenthesis, closed parenthesis,
comma, whitespace)
:return: list of strings after breaking it by the delimiter<|endoftext|>
|
8d25771adba0f0e0973122cd7f36242e1034f141e25515255161fceac41d56c8
|
@classmethod
def _parse_structural_column(cls, parent_column_name: str, parent_data_type: str, result: List[dict]) -> None:
'\n Parse a row or array column\n :param result: list tracking the results\n '
formatted_parent_column_name = parent_column_name
if (' ' in parent_column_name):
formatted_parent_column_name = f'"{parent_column_name}"'
full_data_type = f'{formatted_parent_column_name} {parent_data_type}'
original_result_len = len(result)
data_types = cls._split_data_type(full_data_type, '\\(')
stack: List[Tuple[(str, str)]] = []
for data_type in data_types:
inner_types = cls._split_data_type(data_type, '\\)')
for inner_type in inner_types:
if ((not inner_type) and (len(stack) > 0)):
stack.pop()
elif cls._has_nested_data_types(inner_type):
single_fields = cls._split_data_type(inner_type, ',')
for single_field in single_fields:
single_field = single_field.strip()
if (not single_field):
continue
field_info = cls._split_data_type(single_field, '\\s')
if ((field_info[1] == 'array') or (field_info[1] == 'row')):
stack.append((field_info[0], field_info[1]))
full_parent_path = cls._get_full_name(stack)
result.append(cls._create_column_info(full_parent_path, presto_type_map[field_info[1]]()))
else:
full_parent_path = cls._get_full_name(stack)
column_name = '{}.{}'.format(full_parent_path, field_info[0])
result.append(cls._create_column_info(column_name, presto_type_map[field_info[1]]()))
if (not (inner_type.endswith('array') or inner_type.endswith('row'))):
stack.pop()
elif (('array' == inner_type) or ('row' == inner_type)):
stack.append(('', inner_type))
elif (len(stack) > 0):
stack.pop()
if (formatted_parent_column_name != parent_column_name):
for index in range(original_result_len, len(result)):
result[index]['name'] = result[index]['name'].replace(formatted_parent_column_name, parent_column_name)
|
Parse a row or array column
:param result: list tracking the results
|
superset/db_engine_specs.py
|
_parse_structural_column
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _parse_structural_column(cls, parent_column_name: str, parent_data_type: str, result: List[dict]) -> None:
'\n Parse a row or array column\n :param result: list tracking the results\n '
formatted_parent_column_name = parent_column_name
if (' ' in parent_column_name):
formatted_parent_column_name = f'"{parent_column_name}"'
full_data_type = f'{formatted_parent_column_name} {parent_data_type}'
original_result_len = len(result)
data_types = cls._split_data_type(full_data_type, '\\(')
stack: List[Tuple[(str, str)]] = []
for data_type in data_types:
inner_types = cls._split_data_type(data_type, '\\)')
for inner_type in inner_types:
if ((not inner_type) and (len(stack) > 0)):
stack.pop()
elif cls._has_nested_data_types(inner_type):
single_fields = cls._split_data_type(inner_type, ',')
for single_field in single_fields:
single_field = single_field.strip()
if (not single_field):
continue
field_info = cls._split_data_type(single_field, '\\s')
if ((field_info[1] == 'array') or (field_info[1] == 'row')):
stack.append((field_info[0], field_info[1]))
full_parent_path = cls._get_full_name(stack)
result.append(cls._create_column_info(full_parent_path, presto_type_map[field_info[1]]()))
else:
full_parent_path = cls._get_full_name(stack)
column_name = '{}.{}'.format(full_parent_path, field_info[0])
result.append(cls._create_column_info(column_name, presto_type_map[field_info[1]]()))
if (not (inner_type.endswith('array') or inner_type.endswith('row'))):
stack.pop()
elif (('array' == inner_type) or ('row' == inner_type)):
stack.append((, inner_type))
elif (len(stack) > 0):
stack.pop()
if (formatted_parent_column_name != parent_column_name):
for index in range(original_result_len, len(result)):
result[index]['name'] = result[index]['name'].replace(formatted_parent_column_name, parent_column_name)
|
@classmethod
def _parse_structural_column(cls, parent_column_name: str, parent_data_type: str, result: List[dict]) -> None:
'\n Parse a row or array column\n :param result: list tracking the results\n '
formatted_parent_column_name = parent_column_name
if (' ' in parent_column_name):
formatted_parent_column_name = f'"{parent_column_name}"'
full_data_type = f'{formatted_parent_column_name} {parent_data_type}'
original_result_len = len(result)
data_types = cls._split_data_type(full_data_type, '\\(')
stack: List[Tuple[(str, str)]] = []
for data_type in data_types:
inner_types = cls._split_data_type(data_type, '\\)')
for inner_type in inner_types:
if ((not inner_type) and (len(stack) > 0)):
stack.pop()
elif cls._has_nested_data_types(inner_type):
single_fields = cls._split_data_type(inner_type, ',')
for single_field in single_fields:
single_field = single_field.strip()
if (not single_field):
continue
field_info = cls._split_data_type(single_field, '\\s')
if ((field_info[1] == 'array') or (field_info[1] == 'row')):
stack.append((field_info[0], field_info[1]))
full_parent_path = cls._get_full_name(stack)
result.append(cls._create_column_info(full_parent_path, presto_type_map[field_info[1]]()))
else:
full_parent_path = cls._get_full_name(stack)
column_name = '{}.{}'.format(full_parent_path, field_info[0])
result.append(cls._create_column_info(column_name, presto_type_map[field_info[1]]()))
if (not (inner_type.endswith('array') or inner_type.endswith('row'))):
stack.pop()
elif (('array' == inner_type) or ('row' == inner_type)):
stack.append((, inner_type))
elif (len(stack) > 0):
stack.pop()
if (formatted_parent_column_name != parent_column_name):
for index in range(original_result_len, len(result)):
result[index]['name'] = result[index]['name'].replace(formatted_parent_column_name, parent_column_name)<|docstring|>Parse a row or array column
:param result: list tracking the results<|endoftext|>
|
923599e05a30e2faaad50c37a9ddc1262701ad8144b881d19b7f9cbc21bf67a6
|
@classmethod
def _show_columns(cls, inspector: Inspector, table_name: str, schema: str) -> List[RowProxy]:
'\n Show presto column names\n :param inspector: object that performs database schema inspection\n :param table_name: table name\n :param schema: schema name\n :return: list of column objects\n '
quote = inspector.engine.dialect.identifier_preparer.quote_identifier
full_table = quote(table_name)
if schema:
full_table = '{}.{}'.format(quote(schema), full_table)
columns = inspector.bind.execute('SHOW COLUMNS FROM {}'.format(full_table))
return columns
|
Show presto column names
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: list of column objects
|
superset/db_engine_specs.py
|
_show_columns
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _show_columns(cls, inspector: Inspector, table_name: str, schema: str) -> List[RowProxy]:
'\n Show presto column names\n :param inspector: object that performs database schema inspection\n :param table_name: table name\n :param schema: schema name\n :return: list of column objects\n '
quote = inspector.engine.dialect.identifier_preparer.quote_identifier
full_table = quote(table_name)
if schema:
full_table = '{}.{}'.format(quote(schema), full_table)
columns = inspector.bind.execute('SHOW COLUMNS FROM {}'.format(full_table))
return columns
|
@classmethod
def _show_columns(cls, inspector: Inspector, table_name: str, schema: str) -> List[RowProxy]:
'\n Show presto column names\n :param inspector: object that performs database schema inspection\n :param table_name: table name\n :param schema: schema name\n :return: list of column objects\n '
quote = inspector.engine.dialect.identifier_preparer.quote_identifier
full_table = quote(table_name)
if schema:
full_table = '{}.{}'.format(quote(schema), full_table)
columns = inspector.bind.execute('SHOW COLUMNS FROM {}'.format(full_table))
return columns<|docstring|>Show presto column names
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: list of column objects<|endoftext|>
|
b091afe8a9079d6cd902332f663891679d4fa86e0378e4619c96f5673f3ed043
|
@classmethod
def get_columns(cls, inspector: Inspector, table_name: str, schema: str) -> List[dict]:
'\n Get columns from a Presto data source. This includes handling row and\n array data types\n :param inspector: object that performs database schema inspection\n :param table_name: table name\n :param schema: schema name\n :return: a list of results that contain column info\n (i.e. column name and data type)\n '
columns = cls._show_columns(inspector, table_name, schema)
result: List[dict] = []
for column in columns:
try:
if (('array' in column.Type) or ('row' in column.Type)):
structural_column_index = len(result)
cls._parse_structural_column(column.Column, column.Type, result)
result[structural_column_index]['nullable'] = getattr(column, 'Null', True)
result[structural_column_index]['default'] = None
continue
else:
column_type = presto_type_map[column.Type]()
except KeyError:
logging.info('Did not recognize type {} of column {}'.format(column.Type, column.Column))
column_type = types.NullType
column_info = cls._create_column_info(column.Column, column_type)
column_info['nullable'] = getattr(column, 'Null', True)
column_info['default'] = None
result.append(column_info)
return result
|
Get columns from a Presto data source. This includes handling row and
array data types
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: a list of results that contain column info
(i.e. column name and data type)
|
superset/db_engine_specs.py
|
get_columns
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_columns(cls, inspector: Inspector, table_name: str, schema: str) -> List[dict]:
'\n Get columns from a Presto data source. This includes handling row and\n array data types\n :param inspector: object that performs database schema inspection\n :param table_name: table name\n :param schema: schema name\n :return: a list of results that contain column info\n (i.e. column name and data type)\n '
columns = cls._show_columns(inspector, table_name, schema)
result: List[dict] = []
for column in columns:
try:
if (('array' in column.Type) or ('row' in column.Type)):
structural_column_index = len(result)
cls._parse_structural_column(column.Column, column.Type, result)
result[structural_column_index]['nullable'] = getattr(column, 'Null', True)
result[structural_column_index]['default'] = None
continue
else:
column_type = presto_type_map[column.Type]()
except KeyError:
logging.info('Did not recognize type {} of column {}'.format(column.Type, column.Column))
column_type = types.NullType
column_info = cls._create_column_info(column.Column, column_type)
column_info['nullable'] = getattr(column, 'Null', True)
column_info['default'] = None
result.append(column_info)
return result
|
@classmethod
def get_columns(cls, inspector: Inspector, table_name: str, schema: str) -> List[dict]:
'\n Get columns from a Presto data source. This includes handling row and\n array data types\n :param inspector: object that performs database schema inspection\n :param table_name: table name\n :param schema: schema name\n :return: a list of results that contain column info\n (i.e. column name and data type)\n '
columns = cls._show_columns(inspector, table_name, schema)
result: List[dict] = []
for column in columns:
try:
if (('array' in column.Type) or ('row' in column.Type)):
structural_column_index = len(result)
cls._parse_structural_column(column.Column, column.Type, result)
result[structural_column_index]['nullable'] = getattr(column, 'Null', True)
result[structural_column_index]['default'] = None
continue
else:
column_type = presto_type_map[column.Type]()
except KeyError:
logging.info('Did not recognize type {} of column {}'.format(column.Type, column.Column))
column_type = types.NullType
column_info = cls._create_column_info(column.Column, column_type)
column_info['nullable'] = getattr(column, 'Null', True)
column_info['default'] = None
result.append(column_info)
return result<|docstring|>Get columns from a Presto data source. This includes handling row and
array data types
:param inspector: object that performs database schema inspection
:param table_name: table name
:param schema: schema name
:return: a list of results that contain column info
(i.e. column name and data type)<|endoftext|>
|
0bbae5db79744fa731c1af758e04eb2b887e72b1269435f25a3dabf7ca8b13d1
|
@classmethod
def _is_column_name_quoted(cls, column_name: str) -> bool:
'\n Check if column name is in quotes\n :param column_name: column name\n :return: boolean\n '
return (column_name.startswith('"') and column_name.endswith('"'))
|
Check if column name is in quotes
:param column_name: column name
:return: boolean
|
superset/db_engine_specs.py
|
_is_column_name_quoted
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _is_column_name_quoted(cls, column_name: str) -> bool:
'\n Check if column name is in quotes\n :param column_name: column name\n :return: boolean\n '
return (column_name.startswith('"') and column_name.endswith('"'))
|
@classmethod
def _is_column_name_quoted(cls, column_name: str) -> bool:
'\n Check if column name is in quotes\n :param column_name: column name\n :return: boolean\n '
return (column_name.startswith('"') and column_name.endswith('"'))<|docstring|>Check if column name is in quotes
:param column_name: column name
:return: boolean<|endoftext|>
|
3733ca544aad9ff98378bfd315e8f7d74c2f439ed6ea1d738e32f7d3e1be1878
|
@classmethod
def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]:
'\n Format column clauses where names are in quotes and labels are specified\n :param cols: columns\n :return: column clauses\n '
column_clauses = []
dot_pattern = '\\. # split on period\n (?= # look ahead\n (?: # create non-capture group\n [^\\"]*\\"[^\\"]*\\" # two quotes\n )*[^\\"]*$) # end regex'
dot_regex = re.compile(dot_pattern, re.VERBOSE)
for col in cols:
col_names = re.split(dot_regex, col['name'])
for (index, col_name) in enumerate(col_names):
if (not cls._is_column_name_quoted(col_name)):
col_names[index] = '"{}"'.format(col_name)
quoted_col_name = '.'.join(((col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"') for col_name in col_names))
column_clause = sqla.literal_column(quoted_col_name).label(col['name'])
column_clauses.append(column_clause)
return column_clauses
|
Format column clauses where names are in quotes and labels are specified
:param cols: columns
:return: column clauses
|
superset/db_engine_specs.py
|
_get_fields
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]:
'\n Format column clauses where names are in quotes and labels are specified\n :param cols: columns\n :return: column clauses\n '
column_clauses = []
dot_pattern = '\\. # split on period\n (?= # look ahead\n (?: # create non-capture group\n [^\\"]*\\"[^\\"]*\\" # two quotes\n )*[^\\"]*$) # end regex'
dot_regex = re.compile(dot_pattern, re.VERBOSE)
for col in cols:
col_names = re.split(dot_regex, col['name'])
for (index, col_name) in enumerate(col_names):
if (not cls._is_column_name_quoted(col_name)):
col_names[index] = '"{}"'.format(col_name)
quoted_col_name = '.'.join(((col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"') for col_name in col_names))
column_clause = sqla.literal_column(quoted_col_name).label(col['name'])
column_clauses.append(column_clause)
return column_clauses
|
@classmethod
def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]:
'\n Format column clauses where names are in quotes and labels are specified\n :param cols: columns\n :return: column clauses\n '
column_clauses = []
dot_pattern = '\\. # split on period\n (?= # look ahead\n (?: # create non-capture group\n [^\\"]*\\"[^\\"]*\\" # two quotes\n )*[^\\"]*$) # end regex'
dot_regex = re.compile(dot_pattern, re.VERBOSE)
for col in cols:
col_names = re.split(dot_regex, col['name'])
for (index, col_name) in enumerate(col_names):
if (not cls._is_column_name_quoted(col_name)):
col_names[index] = '"{}"'.format(col_name)
quoted_col_name = '.'.join(((col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"') for col_name in col_names))
column_clause = sqla.literal_column(quoted_col_name).label(col['name'])
column_clauses.append(column_clause)
return column_clauses<|docstring|>Format column clauses where names are in quotes and labels are specified
:param cols: columns
:return: column clauses<|endoftext|>
|
79b8c677df44db1bc2c0bb85cae1bd1f03de0fa2eab4c0a35aec2a6ba0758664
|
@classmethod
def _filter_out_array_nested_cols(cls, cols: List[dict]) -> Tuple[(List[dict], List[dict])]:
'\n Filter out columns that correspond to array content. We know which columns to\n skip because cols is a list provided to us in a specific order where a structural\n column is positioned right before its content.\n\n Example: Column Name: ColA, Column Data Type: array(row(nest_obj int))\n cols = [ ..., ColA, ColA.nest_obj, ... ]\n\n When we run across an array, check if subsequent column names start with the\n array name and skip them.\n :param cols: columns\n :return: filtered list of columns and list of array columns and its nested fields\n '
filtered_cols = []
array_cols = []
curr_array_col_name = None
for col in cols:
if (curr_array_col_name and col['name'].startswith(curr_array_col_name)):
array_cols.append(col)
continue
elif (str(col['type']) == 'ARRAY'):
curr_array_col_name = col['name']
array_cols.append(col)
filtered_cols.append(col)
else:
curr_array_col_name = None
filtered_cols.append(col)
return (filtered_cols, array_cols)
|
Filter out columns that correspond to array content. We know which columns to
skip because cols is a list provided to us in a specific order where a structural
column is positioned right before its content.
Example: Column Name: ColA, Column Data Type: array(row(nest_obj int))
cols = [ ..., ColA, ColA.nest_obj, ... ]
When we run across an array, check if subsequent column names start with the
array name and skip them.
:param cols: columns
:return: filtered list of columns and list of array columns and its nested fields
|
superset/db_engine_specs.py
|
_filter_out_array_nested_cols
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _filter_out_array_nested_cols(cls, cols: List[dict]) -> Tuple[(List[dict], List[dict])]:
'\n Filter out columns that correspond to array content. We know which columns to\n skip because cols is a list provided to us in a specific order where a structural\n column is positioned right before its content.\n\n Example: Column Name: ColA, Column Data Type: array(row(nest_obj int))\n cols = [ ..., ColA, ColA.nest_obj, ... ]\n\n When we run across an array, check if subsequent column names start with the\n array name and skip them.\n :param cols: columns\n :return: filtered list of columns and list of array columns and its nested fields\n '
filtered_cols = []
array_cols = []
curr_array_col_name = None
for col in cols:
if (curr_array_col_name and col['name'].startswith(curr_array_col_name)):
array_cols.append(col)
continue
elif (str(col['type']) == 'ARRAY'):
curr_array_col_name = col['name']
array_cols.append(col)
filtered_cols.append(col)
else:
curr_array_col_name = None
filtered_cols.append(col)
return (filtered_cols, array_cols)
|
@classmethod
def _filter_out_array_nested_cols(cls, cols: List[dict]) -> Tuple[(List[dict], List[dict])]:
'\n Filter out columns that correspond to array content. We know which columns to\n skip because cols is a list provided to us in a specific order where a structural\n column is positioned right before its content.\n\n Example: Column Name: ColA, Column Data Type: array(row(nest_obj int))\n cols = [ ..., ColA, ColA.nest_obj, ... ]\n\n When we run across an array, check if subsequent column names start with the\n array name and skip them.\n :param cols: columns\n :return: filtered list of columns and list of array columns and its nested fields\n '
filtered_cols = []
array_cols = []
curr_array_col_name = None
for col in cols:
if (curr_array_col_name and col['name'].startswith(curr_array_col_name)):
array_cols.append(col)
continue
elif (str(col['type']) == 'ARRAY'):
curr_array_col_name = col['name']
array_cols.append(col)
filtered_cols.append(col)
else:
curr_array_col_name = None
filtered_cols.append(col)
return (filtered_cols, array_cols)<|docstring|>Filter out columns that correspond to array content. We know which columns to
skip because cols is a list provided to us in a specific order where a structural
column is positioned right before its content.
Example: Column Name: ColA, Column Data Type: array(row(nest_obj int))
cols = [ ..., ColA, ColA.nest_obj, ... ]
When we run across an array, check if subsequent column names start with the
array name and skip them.
:param cols: columns
:return: filtered list of columns and list of array columns and its nested fields<|endoftext|>
|
305cecac9809325379957358160a4fbcefc7c732109242c70a36defd8c71e873
|
@classmethod
def select_star(cls, my_db, table_name: str, engine: Engine, schema: str=None, limit: int=100, show_cols: bool=False, indent: bool=True, latest_partition: bool=True, cols: List[dict]=[]) -> str:
"\n Include selecting properties of row objects. We cannot easily break arrays into\n rows, so render the whole array in its own row and skip columns that correspond\n to an array's contents.\n "
presto_cols = cols
if show_cols:
dot_regex = '\\.(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
presto_cols = [col for col in presto_cols if (not re.search(dot_regex, col['name']))]
return super(PrestoEngineSpec, cls).select_star(my_db, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols)
|
Include selecting properties of row objects. We cannot easily break arrays into
rows, so render the whole array in its own row and skip columns that correspond
to an array's contents.
|
superset/db_engine_specs.py
|
select_star
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def select_star(cls, my_db, table_name: str, engine: Engine, schema: str=None, limit: int=100, show_cols: bool=False, indent: bool=True, latest_partition: bool=True, cols: List[dict]=[]) -> str:
"\n Include selecting properties of row objects. We cannot easily break arrays into\n rows, so render the whole array in its own row and skip columns that correspond\n to an array's contents.\n "
presto_cols = cols
if show_cols:
dot_regex = '\\.(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
presto_cols = [col for col in presto_cols if (not re.search(dot_regex, col['name']))]
return super(PrestoEngineSpec, cls).select_star(my_db, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols)
|
@classmethod
def select_star(cls, my_db, table_name: str, engine: Engine, schema: str=None, limit: int=100, show_cols: bool=False, indent: bool=True, latest_partition: bool=True, cols: List[dict]=[]) -> str:
"\n Include selecting properties of row objects. We cannot easily break arrays into\n rows, so render the whole array in its own row and skip columns that correspond\n to an array's contents.\n "
presto_cols = cols
if show_cols:
dot_regex = '\\.(?=(?:[^\\"]*\\"[^\\"]*\\")*[^\\"]*$)'
presto_cols = [col for col in presto_cols if (not re.search(dot_regex, col['name']))]
return super(PrestoEngineSpec, cls).select_star(my_db, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols)<|docstring|>Include selecting properties of row objects. We cannot easily break arrays into
rows, so render the whole array in its own row and skip columns that correspond
to an array's contents.<|endoftext|>
|
2ec08e2d4f1e7bb777a0d77a847986128de976c477b87cc14cedc0825746070b
|
@classmethod
def _build_column_hierarchy(cls, columns: List[dict], parent_column_types: List[str], column_hierarchy: dict) -> None:
"\n Build a graph where the root node represents a column whose data type is in\n parent_column_types. A node's children represent that column's nested fields\n :param columns: list of columns\n :param parent_column_types: list of data types that decide what columns can\n be root nodes\n :param column_hierarchy: dictionary representing the graph\n "
if (len(columns) == 0):
return
root = columns.pop(0)
root_info = {'type': root['type'], 'children': []}
column_hierarchy[root['name']] = root_info
while columns:
column = columns[0]
if (not column['name'].startswith(f"{root['name']}.")):
break
if (str(column['type']) in parent_column_types):
cls._build_column_hierarchy(columns, parent_column_types, column_hierarchy)
root_info['children'].append(column['name'])
continue
else:
root_info['children'].append(column['name'])
columns.pop(0)
|
Build a graph where the root node represents a column whose data type is in
parent_column_types. A node's children represent that column's nested fields
:param columns: list of columns
:param parent_column_types: list of data types that decide what columns can
be root nodes
:param column_hierarchy: dictionary representing the graph
|
superset/db_engine_specs.py
|
_build_column_hierarchy
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _build_column_hierarchy(cls, columns: List[dict], parent_column_types: List[str], column_hierarchy: dict) -> None:
"\n Build a graph where the root node represents a column whose data type is in\n parent_column_types. A node's children represent that column's nested fields\n :param columns: list of columns\n :param parent_column_types: list of data types that decide what columns can\n be root nodes\n :param column_hierarchy: dictionary representing the graph\n "
if (len(columns) == 0):
return
root = columns.pop(0)
root_info = {'type': root['type'], 'children': []}
column_hierarchy[root['name']] = root_info
while columns:
column = columns[0]
if (not column['name'].startswith(f"{root['name']}.")):
break
if (str(column['type']) in parent_column_types):
cls._build_column_hierarchy(columns, parent_column_types, column_hierarchy)
root_info['children'].append(column['name'])
continue
else:
root_info['children'].append(column['name'])
columns.pop(0)
|
@classmethod
def _build_column_hierarchy(cls, columns: List[dict], parent_column_types: List[str], column_hierarchy: dict) -> None:
"\n Build a graph where the root node represents a column whose data type is in\n parent_column_types. A node's children represent that column's nested fields\n :param columns: list of columns\n :param parent_column_types: list of data types that decide what columns can\n be root nodes\n :param column_hierarchy: dictionary representing the graph\n "
if (len(columns) == 0):
return
root = columns.pop(0)
root_info = {'type': root['type'], 'children': []}
column_hierarchy[root['name']] = root_info
while columns:
column = columns[0]
if (not column['name'].startswith(f"{root['name']}.")):
break
if (str(column['type']) in parent_column_types):
cls._build_column_hierarchy(columns, parent_column_types, column_hierarchy)
root_info['children'].append(column['name'])
continue
else:
root_info['children'].append(column['name'])
columns.pop(0)<|docstring|>Build a graph where the root node represents a column whose data type is in
parent_column_types. A node's children represent that column's nested fields
:param columns: list of columns
:param parent_column_types: list of data types that decide what columns can
be root nodes
:param column_hierarchy: dictionary representing the graph<|endoftext|>
|
76d2ea2b341c82f91dc1690dc857951237e261795e4a21e979254b780038f1cd
|
@classmethod
def _create_row_and_array_hierarchy(cls, selected_columns: List[dict]) -> Tuple[(dict, dict, List[dict])]:
"\n Build graphs where the root node represents a row or array and its children\n are that column's nested fields\n :param selected_columns: columns selected in a query\n :return: graph representing a row, graph representing an array, and a list\n of all the nested fields\n "
row_column_hierarchy: OrderedDict = OrderedDict()
array_column_hierarchy: OrderedDict = OrderedDict()
expanded_columns: List[dict] = []
for column in selected_columns:
if column['type'].startswith('ROW'):
parsed_row_columns: List[dict] = []
cls._parse_structural_column(column['name'], column['type'].lower(), parsed_row_columns)
expanded_columns = (expanded_columns + parsed_row_columns[1:])
(filtered_row_columns, array_columns) = cls._filter_out_array_nested_cols(parsed_row_columns)
cls._build_column_hierarchy(filtered_row_columns, ['ROW'], row_column_hierarchy)
cls._build_column_hierarchy(array_columns, ['ROW', 'ARRAY'], array_column_hierarchy)
elif column['type'].startswith('ARRAY'):
parsed_array_columns: List[dict] = []
cls._parse_structural_column(column['name'], column['type'].lower(), parsed_array_columns)
expanded_columns = (expanded_columns + parsed_array_columns[1:])
cls._build_column_hierarchy(parsed_array_columns, ['ROW', 'ARRAY'], array_column_hierarchy)
return (row_column_hierarchy, array_column_hierarchy, expanded_columns)
|
Build graphs where the root node represents a row or array and its children
are that column's nested fields
:param selected_columns: columns selected in a query
:return: graph representing a row, graph representing an array, and a list
of all the nested fields
|
superset/db_engine_specs.py
|
_create_row_and_array_hierarchy
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _create_row_and_array_hierarchy(cls, selected_columns: List[dict]) -> Tuple[(dict, dict, List[dict])]:
"\n Build graphs where the root node represents a row or array and its children\n are that column's nested fields\n :param selected_columns: columns selected in a query\n :return: graph representing a row, graph representing an array, and a list\n of all the nested fields\n "
row_column_hierarchy: OrderedDict = OrderedDict()
array_column_hierarchy: OrderedDict = OrderedDict()
expanded_columns: List[dict] = []
for column in selected_columns:
if column['type'].startswith('ROW'):
parsed_row_columns: List[dict] = []
cls._parse_structural_column(column['name'], column['type'].lower(), parsed_row_columns)
expanded_columns = (expanded_columns + parsed_row_columns[1:])
(filtered_row_columns, array_columns) = cls._filter_out_array_nested_cols(parsed_row_columns)
cls._build_column_hierarchy(filtered_row_columns, ['ROW'], row_column_hierarchy)
cls._build_column_hierarchy(array_columns, ['ROW', 'ARRAY'], array_column_hierarchy)
elif column['type'].startswith('ARRAY'):
parsed_array_columns: List[dict] = []
cls._parse_structural_column(column['name'], column['type'].lower(), parsed_array_columns)
expanded_columns = (expanded_columns + parsed_array_columns[1:])
cls._build_column_hierarchy(parsed_array_columns, ['ROW', 'ARRAY'], array_column_hierarchy)
return (row_column_hierarchy, array_column_hierarchy, expanded_columns)
|
@classmethod
def _create_row_and_array_hierarchy(cls, selected_columns: List[dict]) -> Tuple[(dict, dict, List[dict])]:
"\n Build graphs where the root node represents a row or array and its children\n are that column's nested fields\n :param selected_columns: columns selected in a query\n :return: graph representing a row, graph representing an array, and a list\n of all the nested fields\n "
row_column_hierarchy: OrderedDict = OrderedDict()
array_column_hierarchy: OrderedDict = OrderedDict()
expanded_columns: List[dict] = []
for column in selected_columns:
if column['type'].startswith('ROW'):
parsed_row_columns: List[dict] = []
cls._parse_structural_column(column['name'], column['type'].lower(), parsed_row_columns)
expanded_columns = (expanded_columns + parsed_row_columns[1:])
(filtered_row_columns, array_columns) = cls._filter_out_array_nested_cols(parsed_row_columns)
cls._build_column_hierarchy(filtered_row_columns, ['ROW'], row_column_hierarchy)
cls._build_column_hierarchy(array_columns, ['ROW', 'ARRAY'], array_column_hierarchy)
elif column['type'].startswith('ARRAY'):
parsed_array_columns: List[dict] = []
cls._parse_structural_column(column['name'], column['type'].lower(), parsed_array_columns)
expanded_columns = (expanded_columns + parsed_array_columns[1:])
cls._build_column_hierarchy(parsed_array_columns, ['ROW', 'ARRAY'], array_column_hierarchy)
return (row_column_hierarchy, array_column_hierarchy, expanded_columns)<|docstring|>Build graphs where the root node represents a row or array and its children
are that column's nested fields
:param selected_columns: columns selected in a query
:return: graph representing a row, graph representing an array, and a list
of all the nested fields<|endoftext|>
|
0c45c540a2b209502bdb458012425f4b3fc8acbbe03a507c79ec8e0c88cf793f
|
@classmethod
def _create_empty_row_of_data(cls, columns: List[dict]) -> dict:
'\n Create an empty row of data\n :param columns: list of columns\n :return: dictionary representing an empty row of data\n '
return {column['name']: '' for column in columns}
|
Create an empty row of data
:param columns: list of columns
:return: dictionary representing an empty row of data
|
superset/db_engine_specs.py
|
_create_empty_row_of_data
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _create_empty_row_of_data(cls, columns: List[dict]) -> dict:
'\n Create an empty row of data\n :param columns: list of columns\n :return: dictionary representing an empty row of data\n '
return {column['name']: for column in columns}
|
@classmethod
def _create_empty_row_of_data(cls, columns: List[dict]) -> dict:
'\n Create an empty row of data\n :param columns: list of columns\n :return: dictionary representing an empty row of data\n '
return {column['name']: for column in columns}<|docstring|>Create an empty row of data
:param columns: list of columns
:return: dictionary representing an empty row of data<|endoftext|>
|
007a7cfa70f4a17d1b8821fe3a419ea4fa0e42165b5515d729db3cd31f38d077
|
@classmethod
def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict) -> None:
'\n Separate out nested fields and its value in a row of data\n :param datum: row of data\n :param column: row column name\n :param column_hierarchy: dictionary tracking structural columns and its\n nested fields\n '
if (column in datum):
row_data = datum[column]
row_children = column_hierarchy[column]['children']
if (row_data and (len(row_data) != len(row_children))):
raise Exception('The number of data values and number of nestedfields are not equal')
elif row_data:
for (index, data_value) in enumerate(row_data):
datum[row_children[index]] = data_value
else:
for row_child in row_children:
datum[row_child] = ''
|
Separate out nested fields and its value in a row of data
:param datum: row of data
:param column: row column name
:param column_hierarchy: dictionary tracking structural columns and its
nested fields
|
superset/db_engine_specs.py
|
_expand_row_data
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict) -> None:
'\n Separate out nested fields and its value in a row of data\n :param datum: row of data\n :param column: row column name\n :param column_hierarchy: dictionary tracking structural columns and its\n nested fields\n '
if (column in datum):
row_data = datum[column]
row_children = column_hierarchy[column]['children']
if (row_data and (len(row_data) != len(row_children))):
raise Exception('The number of data values and number of nestedfields are not equal')
elif row_data:
for (index, data_value) in enumerate(row_data):
datum[row_children[index]] = data_value
else:
for row_child in row_children:
datum[row_child] =
|
@classmethod
def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict) -> None:
'\n Separate out nested fields and its value in a row of data\n :param datum: row of data\n :param column: row column name\n :param column_hierarchy: dictionary tracking structural columns and its\n nested fields\n '
if (column in datum):
row_data = datum[column]
row_children = column_hierarchy[column]['children']
if (row_data and (len(row_data) != len(row_children))):
raise Exception('The number of data values and number of nestedfields are not equal')
elif row_data:
for (index, data_value) in enumerate(row_data):
datum[row_children[index]] = data_value
else:
for row_child in row_children:
datum[row_child] = <|docstring|>Separate out nested fields and its value in a row of data
:param datum: row of data
:param column: row column name
:param column_hierarchy: dictionary tracking structural columns and its
nested fields<|endoftext|>
|
b92f84be9b0444c6c078b993c87e35fb4d74e9821fa6c756256a6c55566ceb1d
|
@classmethod
def _split_array_columns_by_process_state(cls, array_columns: List[str], array_column_hierarchy: dict, datum: dict) -> Tuple[(List[str], Set[str])]:
'\n Take a list of array columns and split them according to whether or not we are\n ready to process them from a data set\n :param array_columns: list of array columns\n :param array_column_hierarchy: graph representing array columns\n :param datum: row of data\n :return: list of array columns ready to be processed and set of array columns\n not ready to be processed\n '
array_columns_to_process = []
unprocessed_array_columns = set()
child_array = None
for array_column in array_columns:
if (array_column in datum):
array_columns_to_process.append(array_column)
elif (str(array_column_hierarchy[array_column]['type']) == 'ARRAY'):
child_array = array_column
unprocessed_array_columns.add(child_array)
elif (child_array and array_column.startswith(child_array)):
unprocessed_array_columns.add(array_column)
return (array_columns_to_process, unprocessed_array_columns)
|
Take a list of array columns and split them according to whether or not we are
ready to process them from a data set
:param array_columns: list of array columns
:param array_column_hierarchy: graph representing array columns
:param datum: row of data
:return: list of array columns ready to be processed and set of array columns
not ready to be processed
|
superset/db_engine_specs.py
|
_split_array_columns_by_process_state
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _split_array_columns_by_process_state(cls, array_columns: List[str], array_column_hierarchy: dict, datum: dict) -> Tuple[(List[str], Set[str])]:
'\n Take a list of array columns and split them according to whether or not we are\n ready to process them from a data set\n :param array_columns: list of array columns\n :param array_column_hierarchy: graph representing array columns\n :param datum: row of data\n :return: list of array columns ready to be processed and set of array columns\n not ready to be processed\n '
array_columns_to_process = []
unprocessed_array_columns = set()
child_array = None
for array_column in array_columns:
if (array_column in datum):
array_columns_to_process.append(array_column)
elif (str(array_column_hierarchy[array_column]['type']) == 'ARRAY'):
child_array = array_column
unprocessed_array_columns.add(child_array)
elif (child_array and array_column.startswith(child_array)):
unprocessed_array_columns.add(array_column)
return (array_columns_to_process, unprocessed_array_columns)
|
@classmethod
def _split_array_columns_by_process_state(cls, array_columns: List[str], array_column_hierarchy: dict, datum: dict) -> Tuple[(List[str], Set[str])]:
'\n Take a list of array columns and split them according to whether or not we are\n ready to process them from a data set\n :param array_columns: list of array columns\n :param array_column_hierarchy: graph representing array columns\n :param datum: row of data\n :return: list of array columns ready to be processed and set of array columns\n not ready to be processed\n '
array_columns_to_process = []
unprocessed_array_columns = set()
child_array = None
for array_column in array_columns:
if (array_column in datum):
array_columns_to_process.append(array_column)
elif (str(array_column_hierarchy[array_column]['type']) == 'ARRAY'):
child_array = array_column
unprocessed_array_columns.add(child_array)
elif (child_array and array_column.startswith(child_array)):
unprocessed_array_columns.add(array_column)
return (array_columns_to_process, unprocessed_array_columns)<|docstring|>Take a list of array columns and split them according to whether or not we are
ready to process them from a data set
:param array_columns: list of array columns
:param array_column_hierarchy: graph representing array columns
:param datum: row of data
:return: list of array columns ready to be processed and set of array columns
not ready to be processed<|endoftext|>
|
1184be7123b665b1406baa35e637c8d172b6dcfb7b6809ec96f5291d23b99024
|
@classmethod
def _convert_data_list_to_array_data_dict(cls, data: List[dict], array_columns_to_process: List[str]) -> dict:
"\n Pull out array data from rows of data into a dictionary where the key represents\n the index in the data list and the value is the array data values\n Example:\n data = [\n {'ColumnA': [1, 2], 'ColumnB': 3},\n {'ColumnA': [11, 22], 'ColumnB': 3}\n ]\n data dictionary = {\n 0: [{'ColumnA': [1, 2]],\n 1: [{'ColumnA': [11, 22]]\n }\n :param data: rows of data\n :param array_columns_to_process: array columns we want to pull out\n :return: data dictionary\n "
array_data_dict = {}
for (data_index, datum) in enumerate(data):
all_array_datum = {}
for array_column in array_columns_to_process:
all_array_datum[array_column] = datum[array_column]
array_data_dict[data_index] = [all_array_datum]
return array_data_dict
|
Pull out array data from rows of data into a dictionary where the key represents
the index in the data list and the value is the array data values
Example:
data = [
{'ColumnA': [1, 2], 'ColumnB': 3},
{'ColumnA': [11, 22], 'ColumnB': 3}
]
data dictionary = {
0: [{'ColumnA': [1, 2]],
1: [{'ColumnA': [11, 22]]
}
:param data: rows of data
:param array_columns_to_process: array columns we want to pull out
:return: data dictionary
|
superset/db_engine_specs.py
|
_convert_data_list_to_array_data_dict
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _convert_data_list_to_array_data_dict(cls, data: List[dict], array_columns_to_process: List[str]) -> dict:
"\n Pull out array data from rows of data into a dictionary where the key represents\n the index in the data list and the value is the array data values\n Example:\n data = [\n {'ColumnA': [1, 2], 'ColumnB': 3},\n {'ColumnA': [11, 22], 'ColumnB': 3}\n ]\n data dictionary = {\n 0: [{'ColumnA': [1, 2]],\n 1: [{'ColumnA': [11, 22]]\n }\n :param data: rows of data\n :param array_columns_to_process: array columns we want to pull out\n :return: data dictionary\n "
array_data_dict = {}
for (data_index, datum) in enumerate(data):
all_array_datum = {}
for array_column in array_columns_to_process:
all_array_datum[array_column] = datum[array_column]
array_data_dict[data_index] = [all_array_datum]
return array_data_dict
|
@classmethod
def _convert_data_list_to_array_data_dict(cls, data: List[dict], array_columns_to_process: List[str]) -> dict:
"\n Pull out array data from rows of data into a dictionary where the key represents\n the index in the data list and the value is the array data values\n Example:\n data = [\n {'ColumnA': [1, 2], 'ColumnB': 3},\n {'ColumnA': [11, 22], 'ColumnB': 3}\n ]\n data dictionary = {\n 0: [{'ColumnA': [1, 2]],\n 1: [{'ColumnA': [11, 22]]\n }\n :param data: rows of data\n :param array_columns_to_process: array columns we want to pull out\n :return: data dictionary\n "
array_data_dict = {}
for (data_index, datum) in enumerate(data):
all_array_datum = {}
for array_column in array_columns_to_process:
all_array_datum[array_column] = datum[array_column]
array_data_dict[data_index] = [all_array_datum]
return array_data_dict<|docstring|>Pull out array data from rows of data into a dictionary where the key represents
the index in the data list and the value is the array data values
Example:
data = [
{'ColumnA': [1, 2], 'ColumnB': 3},
{'ColumnA': [11, 22], 'ColumnB': 3}
]
data dictionary = {
0: [{'ColumnA': [1, 2]],
1: [{'ColumnA': [11, 22]]
}
:param data: rows of data
:param array_columns_to_process: array columns we want to pull out
:return: data dictionary<|endoftext|>
|
add35f2da7725984956e8fa81029977db1a7cf43188a981ecab62f9a75ea7b8b
|
@classmethod
def _process_array_data(cls, data: List[dict], all_columns: List[dict], array_column_hierarchy: dict) -> dict:
"\n Pull out array data that is ready to be processed into a dictionary.\n The key refers to the index in the original data set. The value is\n a list of data values. Initially this list will contain just one value,\n the row of data that corresponds to the index in the original data set.\n As we process arrays, we will pull out array values into separate rows\n and append them to the list of data values.\n Example:\n Original data set = [\n {'ColumnA': [1, 2], 'ColumnB': [3]},\n {'ColumnA': [11, 22], 'ColumnB': [33]}\n ]\n all_array_data (intially) = {\n 0: [{'ColumnA': [1, 2], 'ColumnB': [3}],\n 1: [{'ColumnA': [11, 22], 'ColumnB': [33]}]\n }\n all_array_data (after processing) = {\n 0: [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': ''},\n ],\n 1: [\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': ''},\n ],\n }\n :param data: rows of data\n :param all_columns: list of columns\n :param array_column_hierarchy: graph representing array columns\n :return: dictionary representing processed array data\n "
array_columns = list(array_column_hierarchy.keys())
(array_columns_to_process, unprocessed_array_columns) = cls._split_array_columns_by_process_state(array_columns, array_column_hierarchy, data[0])
all_array_data = cls._convert_data_list_to_array_data_dict(data, array_columns_to_process)
for (original_data_index, expanded_array_data) in all_array_data.items():
for array_column in array_columns:
if (array_column in unprocessed_array_columns):
continue
if (str(array_column_hierarchy[array_column]['type']) == 'ROW'):
for array_value in expanded_array_data:
cls._expand_row_data(array_value, array_column, array_column_hierarchy)
continue
array_data = expanded_array_data[0][array_column]
array_children = array_column_hierarchy[array_column]
if ((not array_data) and (not array_children['children'])):
continue
elif (array_data and array_children['children']):
for (array_index, data_value) in enumerate(array_data):
if (array_index >= len(expanded_array_data)):
empty_data = cls._create_empty_row_of_data(all_columns)
expanded_array_data.append(empty_data)
for (index, datum_value) in enumerate(data_value):
array_child = array_children['children'][index]
expanded_array_data[array_index][array_child] = datum_value
elif array_data:
for (array_index, data_value) in enumerate(array_data):
if (array_index >= len(expanded_array_data)):
empty_data = cls._create_empty_row_of_data(all_columns)
expanded_array_data.append(empty_data)
expanded_array_data[array_index][array_column] = data_value
else:
for (index, array_child) in enumerate(array_children['children']):
for array_value in expanded_array_data:
array_value[array_child] = ''
return all_array_data
|
Pull out array data that is ready to be processed into a dictionary.
The key refers to the index in the original data set. The value is
a list of data values. Initially this list will contain just one value,
the row of data that corresponds to the index in the original data set.
As we process arrays, we will pull out array values into separate rows
and append them to the list of data values.
Example:
Original data set = [
{'ColumnA': [1, 2], 'ColumnB': [3]},
{'ColumnA': [11, 22], 'ColumnB': [33]}
]
all_array_data (intially) = {
0: [{'ColumnA': [1, 2], 'ColumnB': [3}],
1: [{'ColumnA': [11, 22], 'ColumnB': [33]}]
}
all_array_data (after processing) = {
0: [
{'ColumnA': 1, 'ColumnB': 3},
{'ColumnA': 2, 'ColumnB': ''},
],
1: [
{'ColumnA': 11, 'ColumnB': 33},
{'ColumnA': 22, 'ColumnB': ''},
],
}
:param data: rows of data
:param all_columns: list of columns
:param array_column_hierarchy: graph representing array columns
:return: dictionary representing processed array data
|
superset/db_engine_specs.py
|
_process_array_data
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _process_array_data(cls, data: List[dict], all_columns: List[dict], array_column_hierarchy: dict) -> dict:
"\n Pull out array data that is ready to be processed into a dictionary.\n The key refers to the index in the original data set. The value is\n a list of data values. Initially this list will contain just one value,\n the row of data that corresponds to the index in the original data set.\n As we process arrays, we will pull out array values into separate rows\n and append them to the list of data values.\n Example:\n Original data set = [\n {'ColumnA': [1, 2], 'ColumnB': [3]},\n {'ColumnA': [11, 22], 'ColumnB': [33]}\n ]\n all_array_data (intially) = {\n 0: [{'ColumnA': [1, 2], 'ColumnB': [3}],\n 1: [{'ColumnA': [11, 22], 'ColumnB': [33]}]\n }\n all_array_data (after processing) = {\n 0: [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': },\n ],\n 1: [\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': },\n ],\n }\n :param data: rows of data\n :param all_columns: list of columns\n :param array_column_hierarchy: graph representing array columns\n :return: dictionary representing processed array data\n "
array_columns = list(array_column_hierarchy.keys())
(array_columns_to_process, unprocessed_array_columns) = cls._split_array_columns_by_process_state(array_columns, array_column_hierarchy, data[0])
all_array_data = cls._convert_data_list_to_array_data_dict(data, array_columns_to_process)
for (original_data_index, expanded_array_data) in all_array_data.items():
for array_column in array_columns:
if (array_column in unprocessed_array_columns):
continue
if (str(array_column_hierarchy[array_column]['type']) == 'ROW'):
for array_value in expanded_array_data:
cls._expand_row_data(array_value, array_column, array_column_hierarchy)
continue
array_data = expanded_array_data[0][array_column]
array_children = array_column_hierarchy[array_column]
if ((not array_data) and (not array_children['children'])):
continue
elif (array_data and array_children['children']):
for (array_index, data_value) in enumerate(array_data):
if (array_index >= len(expanded_array_data)):
empty_data = cls._create_empty_row_of_data(all_columns)
expanded_array_data.append(empty_data)
for (index, datum_value) in enumerate(data_value):
array_child = array_children['children'][index]
expanded_array_data[array_index][array_child] = datum_value
elif array_data:
for (array_index, data_value) in enumerate(array_data):
if (array_index >= len(expanded_array_data)):
empty_data = cls._create_empty_row_of_data(all_columns)
expanded_array_data.append(empty_data)
expanded_array_data[array_index][array_column] = data_value
else:
for (index, array_child) in enumerate(array_children['children']):
for array_value in expanded_array_data:
array_value[array_child] =
return all_array_data
|
@classmethod
def _process_array_data(cls, data: List[dict], all_columns: List[dict], array_column_hierarchy: dict) -> dict:
"\n Pull out array data that is ready to be processed into a dictionary.\n The key refers to the index in the original data set. The value is\n a list of data values. Initially this list will contain just one value,\n the row of data that corresponds to the index in the original data set.\n As we process arrays, we will pull out array values into separate rows\n and append them to the list of data values.\n Example:\n Original data set = [\n {'ColumnA': [1, 2], 'ColumnB': [3]},\n {'ColumnA': [11, 22], 'ColumnB': [33]}\n ]\n all_array_data (intially) = {\n 0: [{'ColumnA': [1, 2], 'ColumnB': [3}],\n 1: [{'ColumnA': [11, 22], 'ColumnB': [33]}]\n }\n all_array_data (after processing) = {\n 0: [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': },\n ],\n 1: [\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': },\n ],\n }\n :param data: rows of data\n :param all_columns: list of columns\n :param array_column_hierarchy: graph representing array columns\n :return: dictionary representing processed array data\n "
array_columns = list(array_column_hierarchy.keys())
(array_columns_to_process, unprocessed_array_columns) = cls._split_array_columns_by_process_state(array_columns, array_column_hierarchy, data[0])
all_array_data = cls._convert_data_list_to_array_data_dict(data, array_columns_to_process)
for (original_data_index, expanded_array_data) in all_array_data.items():
for array_column in array_columns:
if (array_column in unprocessed_array_columns):
continue
if (str(array_column_hierarchy[array_column]['type']) == 'ROW'):
for array_value in expanded_array_data:
cls._expand_row_data(array_value, array_column, array_column_hierarchy)
continue
array_data = expanded_array_data[0][array_column]
array_children = array_column_hierarchy[array_column]
if ((not array_data) and (not array_children['children'])):
continue
elif (array_data and array_children['children']):
for (array_index, data_value) in enumerate(array_data):
if (array_index >= len(expanded_array_data)):
empty_data = cls._create_empty_row_of_data(all_columns)
expanded_array_data.append(empty_data)
for (index, datum_value) in enumerate(data_value):
array_child = array_children['children'][index]
expanded_array_data[array_index][array_child] = datum_value
elif array_data:
for (array_index, data_value) in enumerate(array_data):
if (array_index >= len(expanded_array_data)):
empty_data = cls._create_empty_row_of_data(all_columns)
expanded_array_data.append(empty_data)
expanded_array_data[array_index][array_column] = data_value
else:
for (index, array_child) in enumerate(array_children['children']):
for array_value in expanded_array_data:
array_value[array_child] =
return all_array_data<|docstring|>Pull out array data that is ready to be processed into a dictionary.
The key refers to the index in the original data set. The value is
a list of data values. Initially this list will contain just one value,
the row of data that corresponds to the index in the original data set.
As we process arrays, we will pull out array values into separate rows
and append them to the list of data values.
Example:
Original data set = [
{'ColumnA': [1, 2], 'ColumnB': [3]},
{'ColumnA': [11, 22], 'ColumnB': [33]}
]
all_array_data (intially) = {
0: [{'ColumnA': [1, 2], 'ColumnB': [3}],
1: [{'ColumnA': [11, 22], 'ColumnB': [33]}]
}
all_array_data (after processing) = {
0: [
{'ColumnA': 1, 'ColumnB': 3},
{'ColumnA': 2, 'ColumnB': ''},
],
1: [
{'ColumnA': 11, 'ColumnB': 33},
{'ColumnA': 22, 'ColumnB': ''},
],
}
:param data: rows of data
:param all_columns: list of columns
:param array_column_hierarchy: graph representing array columns
:return: dictionary representing processed array data<|endoftext|>
|
aacca6e4e2328628b766ef3a0dae685862daaeb1d13285d97168042e1f6df2d3
|
@classmethod
def _consolidate_array_data_into_data(cls, data: List[dict], array_data: dict) -> None:
"\n Consolidate data given a list representing rows of data and a dictionary\n representing expanded array data\n Example:\n Original data set = [\n {'ColumnA': [1, 2], 'ColumnB': [3]},\n {'ColumnA': [11, 22], 'ColumnB': [33]}\n ]\n array_data = {\n 0: [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': ''},\n ],\n 1: [\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': ''},\n ],\n }\n Final data set = [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': ''},\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': ''},\n ]\n :param data: list representing rows of data\n :param array_data: dictionary representing expanded array data\n :return: list where data and array_data are combined\n "
data_index = 0
original_data_index = 0
while (data_index < len(data)):
data[data_index].update(array_data[original_data_index][0])
array_data[original_data_index].pop(0)
data[(data_index + 1):(data_index + 1)] = array_data[original_data_index]
data_index = ((data_index + len(array_data[original_data_index])) + 1)
original_data_index = (original_data_index + 1)
|
Consolidate data given a list representing rows of data and a dictionary
representing expanded array data
Example:
Original data set = [
{'ColumnA': [1, 2], 'ColumnB': [3]},
{'ColumnA': [11, 22], 'ColumnB': [33]}
]
array_data = {
0: [
{'ColumnA': 1, 'ColumnB': 3},
{'ColumnA': 2, 'ColumnB': ''},
],
1: [
{'ColumnA': 11, 'ColumnB': 33},
{'ColumnA': 22, 'ColumnB': ''},
],
}
Final data set = [
{'ColumnA': 1, 'ColumnB': 3},
{'ColumnA': 2, 'ColumnB': ''},
{'ColumnA': 11, 'ColumnB': 33},
{'ColumnA': 22, 'ColumnB': ''},
]
:param data: list representing rows of data
:param array_data: dictionary representing expanded array data
:return: list where data and array_data are combined
|
superset/db_engine_specs.py
|
_consolidate_array_data_into_data
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _consolidate_array_data_into_data(cls, data: List[dict], array_data: dict) -> None:
"\n Consolidate data given a list representing rows of data and a dictionary\n representing expanded array data\n Example:\n Original data set = [\n {'ColumnA': [1, 2], 'ColumnB': [3]},\n {'ColumnA': [11, 22], 'ColumnB': [33]}\n ]\n array_data = {\n 0: [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': },\n ],\n 1: [\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': },\n ],\n }\n Final data set = [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': },\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': },\n ]\n :param data: list representing rows of data\n :param array_data: dictionary representing expanded array data\n :return: list where data and array_data are combined\n "
data_index = 0
original_data_index = 0
while (data_index < len(data)):
data[data_index].update(array_data[original_data_index][0])
array_data[original_data_index].pop(0)
data[(data_index + 1):(data_index + 1)] = array_data[original_data_index]
data_index = ((data_index + len(array_data[original_data_index])) + 1)
original_data_index = (original_data_index + 1)
|
@classmethod
def _consolidate_array_data_into_data(cls, data: List[dict], array_data: dict) -> None:
"\n Consolidate data given a list representing rows of data and a dictionary\n representing expanded array data\n Example:\n Original data set = [\n {'ColumnA': [1, 2], 'ColumnB': [3]},\n {'ColumnA': [11, 22], 'ColumnB': [33]}\n ]\n array_data = {\n 0: [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': },\n ],\n 1: [\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': },\n ],\n }\n Final data set = [\n {'ColumnA': 1, 'ColumnB': 3},\n {'ColumnA': 2, 'ColumnB': },\n {'ColumnA': 11, 'ColumnB': 33},\n {'ColumnA': 22, 'ColumnB': },\n ]\n :param data: list representing rows of data\n :param array_data: dictionary representing expanded array data\n :return: list where data and array_data are combined\n "
data_index = 0
original_data_index = 0
while (data_index < len(data)):
data[data_index].update(array_data[original_data_index][0])
array_data[original_data_index].pop(0)
data[(data_index + 1):(data_index + 1)] = array_data[original_data_index]
data_index = ((data_index + len(array_data[original_data_index])) + 1)
original_data_index = (original_data_index + 1)<|docstring|>Consolidate data given a list representing rows of data and a dictionary
representing expanded array data
Example:
Original data set = [
{'ColumnA': [1, 2], 'ColumnB': [3]},
{'ColumnA': [11, 22], 'ColumnB': [33]}
]
array_data = {
0: [
{'ColumnA': 1, 'ColumnB': 3},
{'ColumnA': 2, 'ColumnB': ''},
],
1: [
{'ColumnA': 11, 'ColumnB': 33},
{'ColumnA': 22, 'ColumnB': ''},
],
}
Final data set = [
{'ColumnA': 1, 'ColumnB': 3},
{'ColumnA': 2, 'ColumnB': ''},
{'ColumnA': 11, 'ColumnB': 33},
{'ColumnA': 22, 'ColumnB': ''},
]
:param data: list representing rows of data
:param array_data: dictionary representing expanded array data
:return: list where data and array_data are combined<|endoftext|>
|
8b228f2e53a85502b8b2c7bb33bc46e6f2c95352dab66fb1b0a2d718d0f56ebf
|
@classmethod
def _remove_processed_array_columns(cls, unprocessed_array_columns: Set[str], array_column_hierarchy: dict) -> None:
'\n Remove keys representing array columns that have already been processed\n :param unprocessed_array_columns: list of unprocessed array columns\n :param array_column_hierarchy: graph representing array columns\n '
array_columns = list(array_column_hierarchy.keys())
for array_column in array_columns:
if (array_column in unprocessed_array_columns):
continue
else:
del array_column_hierarchy[array_column]
|
Remove keys representing array columns that have already been processed
:param unprocessed_array_columns: list of unprocessed array columns
:param array_column_hierarchy: graph representing array columns
|
superset/db_engine_specs.py
|
_remove_processed_array_columns
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _remove_processed_array_columns(cls, unprocessed_array_columns: Set[str], array_column_hierarchy: dict) -> None:
'\n Remove keys representing array columns that have already been processed\n :param unprocessed_array_columns: list of unprocessed array columns\n :param array_column_hierarchy: graph representing array columns\n '
array_columns = list(array_column_hierarchy.keys())
for array_column in array_columns:
if (array_column in unprocessed_array_columns):
continue
else:
del array_column_hierarchy[array_column]
|
@classmethod
def _remove_processed_array_columns(cls, unprocessed_array_columns: Set[str], array_column_hierarchy: dict) -> None:
'\n Remove keys representing array columns that have already been processed\n :param unprocessed_array_columns: list of unprocessed array columns\n :param array_column_hierarchy: graph representing array columns\n '
array_columns = list(array_column_hierarchy.keys())
for array_column in array_columns:
if (array_column in unprocessed_array_columns):
continue
else:
del array_column_hierarchy[array_column]<|docstring|>Remove keys representing array columns that have already been processed
:param unprocessed_array_columns: list of unprocessed array columns
:param array_column_hierarchy: graph representing array columns<|endoftext|>
|
f4d91025017a4f9ff278b654ca302116058b1af7926b86e251549afb022eeaf7
|
@classmethod
def expand_data(cls, columns: List[dict], data: List[dict]) -> Tuple[(List[dict], List[dict], List[dict])]:
"\n We do not immediately display rows and arrays clearly in the data grid. This\n method separates out nested fields and data values to help clearly display\n structural columns.\n\n Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)\n Original data set = [\n {'ColumnA': ['a1'], 'ColumnB': [1, 2]},\n {'ColumnA': ['a2'], 'ColumnB': [3, 4]},\n ]\n Expanded data set = [\n {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},\n {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2},\n {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},\n {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4},\n ]\n :param columns: columns selected in the query\n :param data: original data set\n :return: list of all columns(selected columns and their nested fields),\n expanded data set, listed of nested fields\n "
all_columns: List[dict] = []
for column in columns:
if (column['type'].startswith('ARRAY') or column['type'].startswith('ROW')):
cls._parse_structural_column(column['name'], column['type'].lower(), all_columns)
else:
all_columns.append(column)
(row_column_hierarchy, array_column_hierarchy, expanded_columns) = cls._create_row_and_array_hierarchy(columns)
ordered_row_columns = row_column_hierarchy.keys()
for datum in data:
for row_column in ordered_row_columns:
cls._expand_row_data(datum, row_column, row_column_hierarchy)
while array_column_hierarchy:
array_columns = list(array_column_hierarchy.keys())
(array_columns_to_process, unprocessed_array_columns) = cls._split_array_columns_by_process_state(array_columns, array_column_hierarchy, data[0])
all_array_data = cls._process_array_data(data, all_columns, array_column_hierarchy)
cls._consolidate_array_data_into_data(data, all_array_data)
cls._remove_processed_array_columns(unprocessed_array_columns, array_column_hierarchy)
return (all_columns, data, expanded_columns)
|
We do not immediately display rows and arrays clearly in the data grid. This
method separates out nested fields and data values to help clearly display
structural columns.
Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)
Original data set = [
{'ColumnA': ['a1'], 'ColumnB': [1, 2]},
{'ColumnA': ['a2'], 'ColumnB': [3, 4]},
]
Expanded data set = [
{'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2},
{'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4},
]
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
|
superset/db_engine_specs.py
|
expand_data
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def expand_data(cls, columns: List[dict], data: List[dict]) -> Tuple[(List[dict], List[dict], List[dict])]:
"\n We do not immediately display rows and arrays clearly in the data grid. This\n method separates out nested fields and data values to help clearly display\n structural columns.\n\n Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)\n Original data set = [\n {'ColumnA': ['a1'], 'ColumnB': [1, 2]},\n {'ColumnA': ['a2'], 'ColumnB': [3, 4]},\n ]\n Expanded data set = [\n {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},\n {'ColumnA': , 'ColumnA.nested_obj': , 'ColumnB': 2},\n {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},\n {'ColumnA': , 'ColumnA.nested_obj': , 'ColumnB': 4},\n ]\n :param columns: columns selected in the query\n :param data: original data set\n :return: list of all columns(selected columns and their nested fields),\n expanded data set, listed of nested fields\n "
all_columns: List[dict] = []
for column in columns:
if (column['type'].startswith('ARRAY') or column['type'].startswith('ROW')):
cls._parse_structural_column(column['name'], column['type'].lower(), all_columns)
else:
all_columns.append(column)
(row_column_hierarchy, array_column_hierarchy, expanded_columns) = cls._create_row_and_array_hierarchy(columns)
ordered_row_columns = row_column_hierarchy.keys()
for datum in data:
for row_column in ordered_row_columns:
cls._expand_row_data(datum, row_column, row_column_hierarchy)
while array_column_hierarchy:
array_columns = list(array_column_hierarchy.keys())
(array_columns_to_process, unprocessed_array_columns) = cls._split_array_columns_by_process_state(array_columns, array_column_hierarchy, data[0])
all_array_data = cls._process_array_data(data, all_columns, array_column_hierarchy)
cls._consolidate_array_data_into_data(data, all_array_data)
cls._remove_processed_array_columns(unprocessed_array_columns, array_column_hierarchy)
return (all_columns, data, expanded_columns)
|
@classmethod
def expand_data(cls, columns: List[dict], data: List[dict]) -> Tuple[(List[dict], List[dict], List[dict])]:
"\n We do not immediately display rows and arrays clearly in the data grid. This\n method separates out nested fields and data values to help clearly display\n structural columns.\n\n Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)\n Original data set = [\n {'ColumnA': ['a1'], 'ColumnB': [1, 2]},\n {'ColumnA': ['a2'], 'ColumnB': [3, 4]},\n ]\n Expanded data set = [\n {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},\n {'ColumnA': , 'ColumnA.nested_obj': , 'ColumnB': 2},\n {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},\n {'ColumnA': , 'ColumnA.nested_obj': , 'ColumnB': 4},\n ]\n :param columns: columns selected in the query\n :param data: original data set\n :return: list of all columns(selected columns and their nested fields),\n expanded data set, listed of nested fields\n "
all_columns: List[dict] = []
for column in columns:
if (column['type'].startswith('ARRAY') or column['type'].startswith('ROW')):
cls._parse_structural_column(column['name'], column['type'].lower(), all_columns)
else:
all_columns.append(column)
(row_column_hierarchy, array_column_hierarchy, expanded_columns) = cls._create_row_and_array_hierarchy(columns)
ordered_row_columns = row_column_hierarchy.keys()
for datum in data:
for row_column in ordered_row_columns:
cls._expand_row_data(datum, row_column, row_column_hierarchy)
while array_column_hierarchy:
array_columns = list(array_column_hierarchy.keys())
(array_columns_to_process, unprocessed_array_columns) = cls._split_array_columns_by_process_state(array_columns, array_column_hierarchy, data[0])
all_array_data = cls._process_array_data(data, all_columns, array_column_hierarchy)
cls._consolidate_array_data_into_data(data, all_array_data)
cls._remove_processed_array_columns(unprocessed_array_columns, array_column_hierarchy)
return (all_columns, data, expanded_columns)<|docstring|>We do not immediately display rows and arrays clearly in the data grid. This
method separates out nested fields and data values to help clearly display
structural columns.
Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)
Original data set = [
{'ColumnA': ['a1'], 'ColumnB': [1, 2]},
{'ColumnA': ['a2'], 'ColumnB': [3, 4]},
]
Expanded data set = [
{'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2},
{'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},
{'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4},
]
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields<|endoftext|>
|
1b9eee26ae2252f941694bab6d604d6a4775438098b5422e72f0360490185c9e
|
@classmethod
def handle_cursor(cls, cursor, query, session):
'Updates progress information'
logging.info('Polling the cursor for progress')
polled = cursor.poll()
while polled:
stats = polled.get('stats', {})
query = session.query(type(query)).filter_by(id=query.id).one()
if (query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]):
cursor.cancel()
break
if stats:
state = stats.get('state')
if (state == 'FINISHED'):
break
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if (total_splits and completed_splits):
progress = (100 * (completed_splits / total_splits))
logging.info('Query progress: {} / {} splits'.format(completed_splits, total_splits))
if (progress > query.progress):
query.progress = progress
session.commit()
time.sleep(1)
logging.info('Polling the cursor for progress')
polled = cursor.poll()
|
Updates progress information
|
superset/db_engine_specs.py
|
handle_cursor
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def handle_cursor(cls, cursor, query, session):
logging.info('Polling the cursor for progress')
polled = cursor.poll()
while polled:
stats = polled.get('stats', {})
query = session.query(type(query)).filter_by(id=query.id).one()
if (query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]):
cursor.cancel()
break
if stats:
state = stats.get('state')
if (state == 'FINISHED'):
break
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if (total_splits and completed_splits):
progress = (100 * (completed_splits / total_splits))
logging.info('Query progress: {} / {} splits'.format(completed_splits, total_splits))
if (progress > query.progress):
query.progress = progress
session.commit()
time.sleep(1)
logging.info('Polling the cursor for progress')
polled = cursor.poll()
|
@classmethod
def handle_cursor(cls, cursor, query, session):
logging.info('Polling the cursor for progress')
polled = cursor.poll()
while polled:
stats = polled.get('stats', {})
query = session.query(type(query)).filter_by(id=query.id).one()
if (query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]):
cursor.cancel()
break
if stats:
state = stats.get('state')
if (state == 'FINISHED'):
break
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if (total_splits and completed_splits):
progress = (100 * (completed_splits / total_splits))
logging.info('Query progress: {} / {} splits'.format(completed_splits, total_splits))
if (progress > query.progress):
query.progress = progress
session.commit()
time.sleep(1)
logging.info('Polling the cursor for progress')
polled = cursor.poll()<|docstring|>Updates progress information<|endoftext|>
|
b8ff8fc22afc193dc111cf09af2ce002dceb35fbd38c92b195ff8cf4fb12a0fc
|
@classmethod
def _partition_query(cls, table_name, limit=0, order_by=None, filters=None):
'Returns a partition query\n\n :param table_name: the name of the table to get partitions from\n :type table_name: str\n :param limit: the number of partitions to be returned\n :type limit: int\n :param order_by: a list of tuples of field name and a boolean\n that determines if that field should be sorted in descending\n order\n :type order_by: list of (str, bool) tuples\n :param filters: dict of field name and filter value combinations\n '
limit_clause = ('LIMIT {}'.format(limit) if limit else '')
order_by_clause = ''
if order_by:
l = []
for (field, desc) in order_by:
l.append(((field + ' DESC') if desc else ''))
order_by_clause = ('ORDER BY ' + ', '.join(l))
where_clause = ''
if filters:
l = []
for (field, value) in filters.items():
l.append(f"{field} = '{value}'")
where_clause = ('WHERE ' + ' AND '.join(l))
sql = textwrap.dedent(f''' SELECT * FROM "{table_name}$partitions"
{where_clause}
{order_by_clause}
{limit_clause}
''')
return sql
|
Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
|
superset/db_engine_specs.py
|
_partition_query
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _partition_query(cls, table_name, limit=0, order_by=None, filters=None):
'Returns a partition query\n\n :param table_name: the name of the table to get partitions from\n :type table_name: str\n :param limit: the number of partitions to be returned\n :type limit: int\n :param order_by: a list of tuples of field name and a boolean\n that determines if that field should be sorted in descending\n order\n :type order_by: list of (str, bool) tuples\n :param filters: dict of field name and filter value combinations\n '
limit_clause = ('LIMIT {}'.format(limit) if limit else )
order_by_clause =
if order_by:
l = []
for (field, desc) in order_by:
l.append(((field + ' DESC') if desc else ))
order_by_clause = ('ORDER BY ' + ', '.join(l))
where_clause =
if filters:
l = []
for (field, value) in filters.items():
l.append(f"{field} = '{value}'")
where_clause = ('WHERE ' + ' AND '.join(l))
sql = textwrap.dedent(f' SELECT * FROM "{table_name}$partitions"
{where_clause}
{order_by_clause}
{limit_clause}
')
return sql
|
@classmethod
def _partition_query(cls, table_name, limit=0, order_by=None, filters=None):
'Returns a partition query\n\n :param table_name: the name of the table to get partitions from\n :type table_name: str\n :param limit: the number of partitions to be returned\n :type limit: int\n :param order_by: a list of tuples of field name and a boolean\n that determines if that field should be sorted in descending\n order\n :type order_by: list of (str, bool) tuples\n :param filters: dict of field name and filter value combinations\n '
limit_clause = ('LIMIT {}'.format(limit) if limit else )
order_by_clause =
if order_by:
l = []
for (field, desc) in order_by:
l.append(((field + ' DESC') if desc else ))
order_by_clause = ('ORDER BY ' + ', '.join(l))
where_clause =
if filters:
l = []
for (field, value) in filters.items():
l.append(f"{field} = '{value}'")
where_clause = ('WHERE ' + ' AND '.join(l))
sql = textwrap.dedent(f' SELECT * FROM "{table_name}$partitions"
{where_clause}
{order_by_clause}
{limit_clause}
')
return sql<|docstring|>Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations<|endoftext|>
|
5c34774a1e3d58fdfb1d76b28554f6939317aa80f867c6aaf0ff4bca5babacda
|
@classmethod
def latest_partition(cls, table_name, schema, database, show_first=False):
"Returns col name and the latest (max) partition value for a table\n\n :param table_name: the name of the table\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n :param show_first: displays the value for the first partitioning key\n if there are many partitioning keys\n :type show_first: bool\n\n >>> latest_partition('foo_table')\n ('ds', '2018-01-01')\n "
indexes = database.get_indexes(table_name, schema)
if (len(indexes[0]['column_names']) < 1):
raise SupersetTemplateException('The table should have one partitioned field')
elif ((not show_first) and (len(indexes[0]['column_names']) > 1)):
raise SupersetTemplateException('The table should have a single partitioned field to use this function. You may want to use `presto.latest_sub_partition`')
part_field = indexes[0]['column_names'][0]
sql = cls._partition_query(table_name, 1, [(part_field, True)])
df = database.get_df(sql, schema)
return (part_field, cls._latest_partition_from_df(df))
|
Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
('ds', '2018-01-01')
|
superset/db_engine_specs.py
|
latest_partition
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def latest_partition(cls, table_name, schema, database, show_first=False):
"Returns col name and the latest (max) partition value for a table\n\n :param table_name: the name of the table\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n :param show_first: displays the value for the first partitioning key\n if there are many partitioning keys\n :type show_first: bool\n\n >>> latest_partition('foo_table')\n ('ds', '2018-01-01')\n "
indexes = database.get_indexes(table_name, schema)
if (len(indexes[0]['column_names']) < 1):
raise SupersetTemplateException('The table should have one partitioned field')
elif ((not show_first) and (len(indexes[0]['column_names']) > 1)):
raise SupersetTemplateException('The table should have a single partitioned field to use this function. You may want to use `presto.latest_sub_partition`')
part_field = indexes[0]['column_names'][0]
sql = cls._partition_query(table_name, 1, [(part_field, True)])
df = database.get_df(sql, schema)
return (part_field, cls._latest_partition_from_df(df))
|
@classmethod
def latest_partition(cls, table_name, schema, database, show_first=False):
"Returns col name and the latest (max) partition value for a table\n\n :param table_name: the name of the table\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n :param show_first: displays the value for the first partitioning key\n if there are many partitioning keys\n :type show_first: bool\n\n >>> latest_partition('foo_table')\n ('ds', '2018-01-01')\n "
indexes = database.get_indexes(table_name, schema)
if (len(indexes[0]['column_names']) < 1):
raise SupersetTemplateException('The table should have one partitioned field')
elif ((not show_first) and (len(indexes[0]['column_names']) > 1)):
raise SupersetTemplateException('The table should have a single partitioned field to use this function. You may want to use `presto.latest_sub_partition`')
part_field = indexes[0]['column_names'][0]
sql = cls._partition_query(table_name, 1, [(part_field, True)])
df = database.get_df(sql, schema)
return (part_field, cls._latest_partition_from_df(df))<|docstring|>Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
('ds', '2018-01-01')<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.