after_merge
stringlengths 64
17k
| before_merge
stringlengths 60
17k
| source code and errors
stringlengths 236
32.3k
| full_traceback
stringlengths 170
17.7k
| traceback_type
stringclasses 60
values |
|---|---|---|---|---|
def cmdb_get_mainline_object_topo(request, bk_biz_id, bk_supplier_account=''):
"""
@summary: 获取配置平台业务拓扑模型
@param request:
@param bk_biz_id:
@param bk_supplier_account:
@return:
"""
kwargs = {
'bk_biz_id': bk_biz_id,
'bk_supplier_account': bk_supplier_account,
}
client = get_client_by_user(request.user.username)
cc_result = client.cc.get_mainline_object_topo(kwargs)
if not cc_result['result']:
message = handle_api_error(_(u"配置平台(CMDB)"),
'cc.get_mainline_object_topo',
kwargs,
cc_result['message'])
return {'result': cc_result['result'], 'code': cc_result['code'], 'message': message}
data = cc_result['data']
for bk_obj in data:
if bk_obj['bk_obj_id'] == 'host':
bk_obj['bk_obj_name'] = 'IP'
result = {'result': cc_result['result'], 'code': cc_result['code'], 'data': cc_result['data']}
return JsonResponse(result)
|
def cmdb_get_mainline_object_topo(request, bk_biz_id, bk_supplier_account=''):
"""
@summary: 获取配置平台业务拓扑模型
@param request:
@param bk_biz_id:
@param bk_supplier_account:
@return:
"""
kwargs = {
'bk_biz_id': bk_biz_id,
'bk_supplier_account': bk_supplier_account,
}
client = get_client_by_request(request)
cc_result = client.cc.get_mainline_object_topo(kwargs)
if not cc_result['result']:
message = handle_api_error(_(u"配置平台(CMDB)"),
'cc.get_mainline_object_topo',
kwargs,
cc_result['message'])
return {'result': cc_result['result'], 'code': cc_result['code'], 'message': message}
data = cc_result['data']
for bk_obj in data:
if bk_obj['bk_obj_id'] == 'host':
bk_obj['bk_obj_name'] = 'IP'
result = {'result': cc_result['result'], 'code': cc_result['code'], 'data': cc_result['data']}
return JsonResponse(result)
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cc_search_object_attribute(request, obj_id, biz_cc_id, supplier_account):
"""
@summary: 获取对象自定义属性
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_user(request.user.username)
kwargs = {
'bk_obj_id': obj_id,
'bk_supplier_account': supplier_account
}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result['result']:
message = handle_api_error('cc', 'cc.search_object_attribute', kwargs, cc_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
obj_property = []
for item in cc_result['data']:
if item['editable']:
obj_property.append({
'value': item['bk_property_id'],
'text': item['bk_property_name']
})
return JsonResponse({'result': True, 'data': obj_property})
|
def cc_search_object_attribute(request, obj_id, biz_cc_id, supplier_account):
"""
@summary: 获取对象自定义属性
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_request(request)
kwargs = {
'bk_obj_id': obj_id,
'bk_supplier_account': supplier_account
}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result['result']:
message = handle_api_error('cc', 'cc.search_object_attribute', kwargs, cc_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
obj_property = []
for item in cc_result['data']:
if item['editable']:
obj_property.append({
'value': item['bk_property_id'],
'text': item['bk_property_name']
})
return JsonResponse({'result': True, 'data': obj_property})
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cc_search_create_object_attribute(request, obj_id, biz_cc_id, supplier_account):
client = get_client_by_user(request.user.username)
kwargs = {
'bk_obj_id': obj_id,
'bk_supplier_account': supplier_account
}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result['result']:
message = handle_api_error('cc', 'cc.search_object_attribute', kwargs, cc_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
obj_property = []
for item in cc_result['data']:
if item['editable']:
prop_dict = {
'tag_code': item['bk_property_id'],
'type': "input",
'attrs': {
'name': item['bk_property_name'],
'editable': 'true',
},
}
if item['bk_property_id'] in ['bk_set_name']:
prop_dict["attrs"]["validation"] = [
{
"type": "required"
}
]
obj_property.append(prop_dict)
return JsonResponse({'result': True, 'data': obj_property})
|
def cc_search_create_object_attribute(request, obj_id, biz_cc_id, supplier_account):
client = get_client_by_request(request)
kwargs = {
'bk_obj_id': obj_id,
'bk_supplier_account': supplier_account
}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result['result']:
message = handle_api_error('cc', 'cc.search_object_attribute', kwargs, cc_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
obj_property = []
for item in cc_result['data']:
if item['editable']:
prop_dict = {
'tag_code': item['bk_property_id'],
'type': "input",
'attrs': {
'name': item['bk_property_name'],
'editable': 'true',
},
}
if item['bk_property_id'] in ['bk_set_name']:
prop_dict["attrs"]["validation"] = [
{
"type": "required"
}
]
obj_property.append(prop_dict)
return JsonResponse({'result': True, 'data': obj_property})
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cc_search_topo(request, obj_id, category, biz_cc_id, supplier_account):
"""
@summary: 查询对象拓扑
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_user(request.user.username)
kwargs = {
'bk_biz_id': biz_cc_id,
'bk_supplier_account': supplier_account
}
cc_result = client.cc.search_biz_inst_topo(kwargs)
if not cc_result['result']:
message = handle_api_error('cc', 'cc.search_biz_inst_topo', kwargs, cc_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
if category in ["normal", "prev", "picker"]:
cc_topo = cc_format_topo_data(cc_result['data'], obj_id, category)
else:
cc_topo = []
return JsonResponse({'result': True, 'data': cc_topo})
|
def cc_search_topo(request, obj_id, category, biz_cc_id, supplier_account):
"""
@summary: 查询对象拓扑
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_request(request)
kwargs = {
'bk_biz_id': biz_cc_id,
'bk_supplier_account': supplier_account
}
cc_result = client.cc.search_biz_inst_topo(kwargs)
if not cc_result['result']:
message = handle_api_error('cc', 'cc.search_biz_inst_topo', kwargs, cc_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
if category in ["normal", "prev", "picker"]:
cc_topo = cc_format_topo_data(cc_result['data'], obj_id, category)
else:
cc_topo = []
return JsonResponse({'result': True, 'data': cc_topo})
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def job_get_script_list(request, biz_cc_id):
"""
查询业务脚本列表
:param request:
:param biz_cc_id:
:return:
"""
# 查询脚本列表
client = get_client_by_user(request.user.username)
script_type = request.GET.get('type')
kwargs = {
'bk_biz_id': biz_cc_id,
'is_public': True if script_type == 'public' else False
}
script_result = client.job.get_script_list(kwargs)
if not script_result['result']:
message = handle_api_error('cc', 'job.get_script_list', kwargs, script_result['message'])
logger.error(message)
result = {
'result': False,
'message': message
}
return JsonResponse(result)
script_dict = {}
for script in script_result['data']['data']:
script_dict.setdefault(script['name'], []).append(script['id'])
version_data = []
for name, version in script_dict.items():
version_data.append({
"text": name,
"value": max(version)
})
return JsonResponse({'result': True, 'data': version_data})
|
def job_get_script_list(request, biz_cc_id):
"""
查询业务脚本列表
:param request:
:param biz_cc_id:
:return:
"""
# 查询脚本列表
client = get_client_by_request(request)
script_type = request.GET.get('type')
kwargs = {
'bk_biz_id': biz_cc_id,
'is_public': True if script_type == 'public' else False
}
script_result = client.job.get_script_list(kwargs)
if not script_result['result']:
message = handle_api_error('cc', 'job.get_script_list', kwargs, script_result['message'])
logger.error(message)
result = {
'result': False,
'message': message
}
return JsonResponse(result)
script_dict = {}
for script in script_result['data']['data']:
script_dict.setdefault(script['name'], []).append(script['id'])
version_data = []
for name, version in script_dict.items():
version_data.append({
"text": name,
"value": max(version)
})
return JsonResponse({'result': True, 'data': version_data})
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def job_get_job_tasks_by_biz(request, biz_cc_id):
client = get_client_by_user(request.user.username)
job_result = client.job.get_job_list({'bk_biz_id': biz_cc_id})
if not job_result['result']:
message = _(u"查询作业平台(JOB)的作业模板[app_id=%s]接口job.get_task返回失败: %s") % (
biz_cc_id, job_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
task_list = []
for task in job_result['data']:
task_list.append({
'value': task['bk_job_id'],
'text': task['name'],
})
return JsonResponse({'result': True, 'data': task_list})
|
def job_get_job_tasks_by_biz(request, biz_cc_id):
client = get_client_by_request(request)
job_result = client.job.get_job_list({'bk_biz_id': biz_cc_id})
if not job_result['result']:
message = _(u"查询作业平台(JOB)的作业模板[app_id=%s]接口job.get_task返回失败: %s") % (
biz_cc_id, job_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
task_list = []
for task in job_result['data']:
task_list.append({
'value': task['bk_job_id'],
'text': task['name'],
})
return JsonResponse({'result': True, 'data': task_list})
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def job_get_job_task_detail(request, biz_cc_id, task_id):
client = get_client_by_user(request.user.username)
job_result = client.job.get_job_detail({'bk_biz_id': biz_cc_id,
'bk_job_id': task_id})
if not job_result['result']:
message = _(u"查询作业平台(JOB)的作业模板详情[app_id=%s]接口job.get_task_detail返回失败: %s") % (
biz_cc_id, job_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
job_step_type_name = {
1: _(u"脚本"),
2: _(u"文件"),
4: u"SQL"
}
task_detail = job_result['data']
global_var = []
steps = []
for var in task_detail.get('global_vars', []):
# 1-字符串, 2-IP, 3-索引数组, 4-关联数组
if var['type'] in [JOB_VAR_TYPE_STR, JOB_VAR_TYPE_IP, JOB_VAR_TYPE_ARRAY]:
value = var.get('value', '')
else:
value = ['{plat_id}:{ip}'.format(plat_id=ip_item['bk_cloud_id'], ip=ip_item['ip'])
for ip_item in var.get('ip_list', [])]
global_var.append({
'id': var['id'],
# 全局变量类型:1:云参, 2:上下文参数,3:IP
'category': var.get('category', 1),
'name': var['name'],
'type': var['type'],
'value': value,
'description': var['description']
})
for info in task_detail.get('steps', []):
# 1-执行脚本, 2-传文件, 4-传SQL
steps.append({
'stepId': info['step_id'],
'name': info['name'],
'scriptParams': info.get('script_param', ''),
'account': info.get('account', ''),
'ipList': '',
'type': info['type'],
'type_name': job_step_type_name.get(info['type'], info['type'])
})
return JsonResponse({'result': True, 'data': {'global_var': global_var, 'steps': steps}})
|
def job_get_job_task_detail(request, biz_cc_id, task_id):
client = get_client_by_request(request)
job_result = client.job.get_job_detail({'bk_biz_id': biz_cc_id,
'bk_job_id': task_id})
if not job_result['result']:
message = _(u"查询作业平台(JOB)的作业模板详情[app_id=%s]接口job.get_task_detail返回失败: %s") % (
biz_cc_id, job_result['message'])
logger.error(message)
result = {
'result': False,
'data': [],
'message': message
}
return JsonResponse(result)
job_step_type_name = {
1: _(u"脚本"),
2: _(u"文件"),
4: u"SQL"
}
task_detail = job_result['data']
global_var = []
steps = []
for var in task_detail.get('global_vars', []):
# 1-字符串, 2-IP, 3-索引数组, 4-关联数组
if var['type'] in [JOB_VAR_TYPE_STR, JOB_VAR_TYPE_IP, JOB_VAR_TYPE_ARRAY]:
value = var.get('value', '')
else:
value = ['{plat_id}:{ip}'.format(plat_id=ip_item['bk_cloud_id'], ip=ip_item['ip'])
for ip_item in var.get('ip_list', [])]
global_var.append({
'id': var['id'],
# 全局变量类型:1:云参, 2:上下文参数,3:IP
'category': var.get('category', 1),
'name': var['name'],
'type': var['type'],
'value': value,
'description': var['description']
})
for info in task_detail.get('steps', []):
# 1-执行脚本, 2-传文件, 4-传SQL
steps.append({
'stepId': info['step_id'],
'name': info['name'],
'scriptParams': info.get('script_param', ''),
'account': info.get('account', ''),
'ipList': '',
'type': info['type'],
'type_name': job_step_type_name.get(info['type'], info['type'])
})
return JsonResponse({'result': True, 'data': {'global_var': global_var, 'steps': steps}})
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def get_bk_user(request):
bkuser = None
if request.weixin_user and not isinstance(request.weixin_user, AnonymousUser):
user_model = get_user_model()
try:
user_property = UserProperty.objects.get(key='wx_userid', value=request.weixin_user.userid)
except UserProperty.DoesNotExist:
logger.warning('user[wx_userid=%s] not in UserProperty' % request.weixin_user.userid)
else:
bkuser = user_model.objects.get(username=user_property.user.username)
return bkuser or AnonymousUser()
|
def get_bk_user(request):
bkuser = None
if request.weixin_user and not isinstance(request.weixin_user, AnonymousUser):
try:
user_property = UserProperty.objects.get(key='wx_userid', value=request.weixin_user.userid)
bkuser = user_property.user
except UserProperty.DoesNotExist:
bkuser = None
return bkuser or AnonymousUser()
|
[{'piece_type': 'error message', 'piece_content': '------STARTING: Migrate Database------\\nTraceback (most recent call last):\\nFile "manage.py", line 27, in <module>\\nexecute_from_command_line(sys.argv)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line\\nutility.execute()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute\\ndjango.setup()\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup\\napps.populate(settings.INSTALLED_APPS)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate\\napp_config = AppConfig.create(entry)\\nFile "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create\\nmod = import_module(mod_path)\\nFile "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module\\n__import__(name)\\nFile "/data/app/code/pipeline/apps.py", line 18, in <module>\\nfrom rediscluster import StrictRedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>\\nfrom .client import StrictRedisCluster, RedisCluster\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>\\nfrom .connection import (\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>\\nfrom .nodemanager import NodeManager\\nFile "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>\\nfrom redis._compat import b, unicode, bytes, long, basestring\\nImportError: cannot import name b\\n------FAILURE: Migrate Database------'}]
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def fit(self, dataset: Dataset):
"""Calculates statistics for this workflow on the input dataset
Parameters
-----------
dataset: Dataset
The input dataset to calculate statistics for. If there is a train/test split this
data should be the training dataset only.
"""
self._clear_worker_cache()
ddf = dataset.to_ddf(columns=self._input_columns())
# Get a dictionary mapping all StatOperators we need to fit to a set of any dependant
# StatOperators (having StatOperators that depend on the output of other StatOperators
# means that will have multiple phases in the fit cycle here)
stat_ops = {op: _get_stat_ops(op.parents) for op in _get_stat_ops([self.column_group])}
while stat_ops:
# get all the StatOperators that we can currently call fit on (no outstanding
# dependencies)
current_phase = [op for op, dependencies in stat_ops.items() if not dependencies]
if not current_phase:
# this shouldn't happen, but lets not infinite loop just in case
raise RuntimeError("failed to find dependency-free StatOperator to fit")
stats, ops = [], []
for column_group in current_phase:
# apply transforms necessary for the inputs to the current column group, ignoring
# the transforms from the statop itself
transformed_ddf = _transform_ddf(ddf, column_group.parents)
op = column_group.op
try:
stats.append(op.fit(column_group.input_column_names, transformed_ddf))
ops.append(op)
except Exception:
LOG.exception("Failed to fit operator %s", column_group.op)
raise
if self.client:
results = [r.result() for r in self.client.compute(stats)]
else:
results = dask.compute(stats, scheduler="synchronous")[0]
for computed_stats, op in zip(results, ops):
op.fit_finalize(computed_stats)
# Remove all the operators we processed in this phase, and remove
# from the dependencies of other ops too
for stat_op in current_phase:
stat_ops.pop(stat_op)
for dependencies in stat_ops.values():
dependencies.difference_update(current_phase)
# hack: store input/output dtypes here. We should have complete dtype
# information for each operator (like we do for column names), but as
# an interim solution this gets us what we need.
input_dtypes = dataset.to_ddf()[self._input_columns()].dtypes
self.input_dtypes = dict(zip(input_dtypes.index, input_dtypes))
output_dtypes = self.transform(dataset).to_ddf().head(1).dtypes
self.output_dtypes = dict(zip(output_dtypes.index, output_dtypes))
|
def fit(self, dataset: Dataset):
"""Calculates statistics for this workflow on the input dataset
Parameters
-----------
dataset: Dataset
The input dataset to calculate statistics for. If there is a train/test split this
data should be the training dataset only.
"""
self._clear_worker_cache()
ddf = dataset.to_ddf(columns=self._input_columns())
# Get a dictionary mapping all StatOperators we need to fit to a set of any dependant
# StatOperators (having StatOperators that depend on the output of other StatOperators
# means that will have multiple phases in the fit cycle here)
stat_ops = {op: _get_stat_ops(op.parents) for op in _get_stat_ops([self.column_group])}
while stat_ops:
# get all the StatOperators that we can currently call fit on (no outstanding
# dependencies)
current_phase = [op for op, dependencies in stat_ops.items() if not dependencies]
if not current_phase:
# this shouldn't happen, but lets not infinite loop just in case
raise RuntimeError("failed to find dependency-free StatOperator to fit")
stats, ops = [], []
for column_group in current_phase:
# apply transforms necessary for the inputs to the current column group, ignoring
# the transforms from the statop itself
transformed_ddf = _transform_ddf(ddf, column_group.parents)
op = column_group.op
try:
stats.append(op.fit(column_group.input_column_names, transformed_ddf))
ops.append(op)
except Exception:
LOG.exception("Failed to fit operator %s", column_group.op)
raise
if self.client:
results = [r.result() for r in self.client.compute(stats)]
else:
results = dask.compute(stats, scheduler="synchronous")[0]
for computed_stats, op in zip(results, ops):
op.fit_finalize(computed_stats)
# Remove all the operators we processed in this phase, and remove
# from the dependencies of other ops too
for stat_op in current_phase:
stat_ops.pop(stat_op)
for dependencies in stat_ops.values():
dependencies.difference_update(current_phase)
# hack: store input/output dtypes here. We should have complete dtype
# information for each operator (like we do for column names), but as
# an interim solution this gets us what we need.
input_dtypes = dataset.to_ddf().dtypes
self.input_dtypes = dict(zip(input_dtypes.index, input_dtypes))
output_dtypes = self.transform(dataset).to_ddf().head(1).dtypes
self.output_dtypes = dict(zip(output_dtypes.index, output_dtypes))
|
[{'piece_type': 'error message', 'piece_content': 'E0224 15:58:10.330248 178 model_repository_manager.cc:963] failed to load \\'amazonreview_tf\\' version 1: Internal: unable to create stream: the provided PTX was compiled with an unsupported toolchain.\\n/nvtabular/nvtabular/workflow.py:236: UserWarning: Loading workflow generated with cudf version 0+untagged.1.gbd321d1 - but we are running cudf 0.18.0a+253.g53ed28e91c. This might cause issues\\nwarnings.warn(\\nE0224 15:58:20.534884 178 model_repository_manager.cc:963] failed to load \\'amazonreview_nvt\\' version 1: Internal: Traceback (most recent call last):\\nFile "/opt/tritonserver/backends/python/startup.py", line 197, in Init\\nself.backend.initialize(args)\\nFile "/models/models/amazonreview_nvt/1/model.py", line 57, in initialize\\nself.output_dtypes[name] = triton_string_to_numpy(conf["data_type"])\\nTypeError: \\'NoneType\\' object is not subscriptable\\nI0224 15:58:20.535093 178 server.cc:490]'}]
|
E0224 15:58:10.330248 178 model_repository_manager.cc:963] failed to load 'amazonreview_tf' version 1: Internal: unable to create stream: the provided PTX was compiled with an unsupported toolchain.
/nvtabular/nvtabular/workflow.py:236: UserWarning: Loading workflow generated with cudf version 0+untagged.1.gbd321d1 - but we are running cudf 0.18.0a+253.g53ed28e91c. This might cause issues
warnings.warn(
E0224 15:58:20.534884 178 model_repository_manager.cc:963] failed to load 'amazonreview_nvt' version 1: Internal: Traceback (most recent call last):
File "/opt/tritonserver/backends/python/startup.py", line 197, in Init
self.backend.initialize(args)
File "/models/models/amazonreview_nvt/1/model.py", line 57, in initialize
self.output_dtypes[name] = triton_string_to_numpy(conf["data_type"])
TypeError: 'NoneType' object is not subscriptable
I0224 15:58:20.535093 178 server.cc:490]
|
TypeError
|
def main(args):
"""Multi-GPU Criteo/DLRM Preprocessing Benchmark
This benchmark is designed to measure the time required to preprocess
the Criteo (1TB) dataset for Facebook’s DLRM model. The user must specify
the path of the raw dataset (using the `--data-path` flag), as well as the
output directory for all temporary/final data (using the `--out-path` flag)
Example Usage
-------------
python dask-nvtabular-criteo-benchmark.py
--data-path /path/to/criteo_parquet --out-path /out/dir/`
Dataset Requirements (Parquet)
------------------------------
This benchmark is designed with a parquet-formatted dataset in mind.
While a CSV-formatted dataset can be processed by NVTabular, converting
to parquet will yield significantly better performance. To convert your
dataset, try using the `optimize_criteo.ipynb` notebook (also located
in `NVTabular/examples/`)
For a detailed parameter overview see `NVTabular/examples/MultiGPUBench.md`
"""
# Input
data_path = args.data_path[:-1] if args.data_path[-1] == "/" else args.data_path
freq_limit = args.freq_limit
out_files_per_proc = args.out_files_per_proc
high_card_columns = args.high_cards.split(",")
dashboard_port = args.dashboard_port
if args.protocol == "ucx":
UCX_TLS = os.environ.get("UCX_TLS", "tcp,cuda_copy,cuda_ipc,sockcm")
os.environ["UCX_TLS"] = UCX_TLS
# Cleanup output directory
base_dir = args.out_path[:-1] if args.out_path[-1] == "/" else args.out_path
dask_workdir = os.path.join(base_dir, "workdir")
output_path = os.path.join(base_dir, "output")
stats_path = os.path.join(base_dir, "stats")
setup_dirs(base_dir, dask_workdir, output_path, stats_path)
# Use Criteo dataset by default (for now)
cont_names = (
args.cont_names.split(",") if args.cont_names else ["I" + str(x) for x in range(1, 14)]
)
cat_names = (
args.cat_names.split(",") if args.cat_names else ["C" + str(x) for x in range(1, 27)]
)
label_name = ["label"]
# Specify Categorify/GroupbyStatistics options
tree_width = {}
cat_cache = {}
for col in cat_names:
if col in high_card_columns:
tree_width[col] = args.tree_width
cat_cache[col] = args.cat_cache_high
else:
tree_width[col] = 1
cat_cache[col] = args.cat_cache_low
# Use total device size to calculate args.device_limit_frac
device_size = device_mem_size(kind="total")
device_limit = int(args.device_limit_frac * device_size)
device_pool_size = int(args.device_pool_frac * device_size)
part_size = int(args.part_mem_frac * device_size)
# Parse shuffle option
shuffle = None
if args.shuffle == "PER_WORKER":
shuffle = nvt_io.Shuffle.PER_WORKER
elif args.shuffle == "PER_PARTITION":
shuffle = nvt_io.Shuffle.PER_PARTITION
# Check if any device memory is already occupied
for dev in args.devices.split(","):
fmem = _pynvml_mem_size(kind="free", index=int(dev))
used = (device_size - fmem) / 1e9
if used > 1.0:
warnings.warn(f"BEWARE - {used} GB is already occupied on device {int(dev)}!")
# Setup LocalCUDACluster
if args.protocol == "tcp":
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
else:
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
enable_nvlink=True,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
client = Client(cluster)
# Setup RMM pool
if args.device_pool_frac > 0.01:
setup_rmm_pool(client, device_pool_size)
# Define Dask NVTabular "Workflow"
if args.normalize:
cont_features = cont_names >> ops.FillMissing() >> ops.Normalize()
else:
cont_features = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp()
cat_features = cat_names >> ops.Categorify(
out_path=stats_path,
tree_width=tree_width,
cat_cache=cat_cache,
freq_threshold=freq_limit,
search_sorted=not freq_limit,
on_host=not args.cats_on_device,
)
processor = Workflow(cat_features + cont_features + label_name, client=client)
dataset = Dataset(data_path, "parquet", part_size=part_size)
# Execute the dask graph
runtime = time.time()
processor.fit(dataset)
if args.profile is not None:
with performance_report(filename=args.profile):
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
else:
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
runtime = time.time() - runtime
print("\\nDask-NVTabular DLRM/Criteo benchmark")
print("--------------------------------------")
print(f"partition size | {part_size}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devices}")
print(f"rmm-pool-frac | {(args.device_pool_frac)}")
print(f"out-files-per-proc | {args.out_files_per_proc}")
print(f"num_io_threads | {args.num_io_threads}")
print(f"shuffle | {args.shuffle}")
print(f"cats-on-device | {args.cats_on_device}")
print("======================================")
print(f"Runtime[s] | {runtime}")
print("======================================\\n")
client.close()
|
def main(args):
"""Multi-GPU Criteo/DLRM Preprocessing Benchmark
This benchmark is designed to measure the time required to preprocess
the Criteo (1TB) dataset for Facebook’s DLRM model. The user must specify
the path of the raw dataset (using the `--data-path` flag), as well as the
output directory for all temporary/final data (using the `--out-path` flag)
Example Usage
-------------
python dask-nvtabular-criteo-benchmark.py
--data-path /path/to/criteo_parquet --out-path /out/dir/`
Dataset Requirements (Parquet)
------------------------------
This benchmark is designed with a parquet-formatted dataset in mind.
While a CSV-formatted dataset can be processed by NVTabular, converting
to parquet will yield significantly better performance. To convert your
dataset, try using the `optimize_criteo.ipynb` notebook (also located
in `NVTabular/examples/`)
For a detailed parameter overview see `NVTabular/examples/MultiGPUBench.md`
"""
# Input
data_path = args.data_path
freq_limit = args.freq_limit
out_files_per_proc = args.out_files_per_proc
high_card_columns = args.high_cards.split(",")
dashboard_port = args.dashboard_port
if args.protocol == "ucx":
UCX_TLS = os.environ.get("UCX_TLS", "tcp,cuda_copy,cuda_ipc,sockcm")
os.environ["UCX_TLS"] = UCX_TLS
# Cleanup output directory
BASE_DIR = args.out_path
dask_workdir = os.path.join(BASE_DIR, "workdir")
output_path = os.path.join(BASE_DIR, "output")
stats_path = os.path.join(BASE_DIR, "stats")
if not os.path.isdir(BASE_DIR):
os.mkdir(BASE_DIR)
for dir_path in (dask_workdir, output_path, stats_path):
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
os.mkdir(dir_path)
# Use Criteo dataset by default (for now)
cont_names = (
args.cont_names.split(",") if args.cont_names else ["I" + str(x) for x in range(1, 14)]
)
cat_names = (
args.cat_names.split(",") if args.cat_names else ["C" + str(x) for x in range(1, 27)]
)
label_name = ["label"]
# Specify Categorify/GroupbyStatistics options
tree_width = {}
cat_cache = {}
for col in cat_names:
if col in high_card_columns:
tree_width[col] = args.tree_width
cat_cache[col] = args.cat_cache_high
else:
tree_width[col] = 1
cat_cache[col] = args.cat_cache_low
# Use total device size to calculate args.device_limit_frac
device_size = device_mem_size(kind="total")
device_limit = int(args.device_limit_frac * device_size)
device_pool_size = int(args.device_pool_frac * device_size)
part_size = int(args.part_mem_frac * device_size)
# Parse shuffle option
shuffle = None
if args.shuffle == "PER_WORKER":
shuffle = nvt_io.Shuffle.PER_WORKER
elif args.shuffle == "PER_PARTITION":
shuffle = nvt_io.Shuffle.PER_PARTITION
# Check if any device memory is already occupied
for dev in args.devices.split(","):
fmem = _pynvml_mem_size(kind="free", index=int(dev))
used = (device_size - fmem) / 1e9
if used > 1.0:
warnings.warn(f"BEWARE - {used} GB is already occupied on device {int(dev)}!")
# Setup LocalCUDACluster
if args.protocol == "tcp":
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
else:
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
enable_nvlink=True,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
client = Client(cluster)
# Setup RMM pool
if args.device_pool_frac > 0.01:
setup_rmm_pool(client, device_pool_size)
# Define Dask NVTabular "Workflow"
if args.normalize:
cont_features = cont_names >> ops.FillMissing() >> ops.Normalize()
else:
cont_features = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp()
cat_features = cat_names >> ops.Categorify(
out_path=stats_path,
tree_width=tree_width,
cat_cache=cat_cache,
freq_threshold=freq_limit,
search_sorted=not freq_limit,
on_host=not args.cats_on_device,
)
processor = Workflow(cat_features + cont_features + label_name, client=client)
dataset = Dataset(data_path, "parquet", part_size=part_size)
# Execute the dask graph
runtime = time.time()
processor.fit(dataset)
if args.profile is not None:
with performance_report(filename=args.profile):
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
else:
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
runtime = time.time() - runtime
print("\\nDask-NVTabular DLRM/Criteo benchmark")
print("--------------------------------------")
print(f"partition size | {part_size}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devices}")
print(f"rmm-pool-frac | {(args.device_pool_frac)}")
print(f"out-files-per-proc | {args.out_files_per_proc}")
print(f"num_io_threads | {args.num_io_threads}")
print(f"shuffle | {args.shuffle}")
print(f"cats-on-device | {args.cats_on_device}")
print("======================================")
print(f"Runtime[s] | {runtime}")
print("======================================\\n")
client.close()
|
[{'piece_type': 'error message', 'piece_content': '(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f\\nrac 0.7 --device-pool-frac 0.8\\ndistributed.worker - WARNING - Compute Failed\\nFunction: subgraph_callable\\nargs: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26\\n0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550\\n1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394\\n2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227\\n3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000\\nkwargs: {}\\nException: FileNotFoundError(2, \\'No such file or directory\\')\\n\\nTraceback (most recent call last):\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>\\nmain(parse_args())\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main\\noutput_path=output_path,\\nFile "/nvtabular/nvtabular/workflow.py", line 876, in apply\\ndtypes=dtypes,\\nFile "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph\\nnum_threads=num_io_threads,\\nFile "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset\\nnum_threads,\\nFile "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset\\nout = client.compute(out).result()\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result\\nraise exc.with_traceback(tb)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__\\nreturn core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get\\nresult = _execute_task(task, cache)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task\\nreturn func(*(_execute_task(a, cache) for a in args))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply\\nreturn func(*args, **kwargs)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce\\ndf = func(*args, **kwargs)\\nFile "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op\\ngdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)\\nFile "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner\\nreturn func(*args, **kwds)\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op\\ncat_names=cat_names,\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode\\ncache, path, columns=selection_r, cache=cat_cache, cats_only=True\\nFile "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data\\nwith open(path, "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'gs://merlin-datasets/output/stats/categories/unique.C1.parquet\\''}]
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
self.data_paths = []
self.data_files = []
self.data_writers = []
self.data_bios = []
self._lock = threading.RLock()
self.pwriter = self._pwriter
self.pwriter_kwargs = {}
|
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
self.data_paths = []
self.data_writers = []
self.data_bios = []
self._lock = threading.RLock()
self.pwriter = self._pwriter
self.pwriter_kwargs = {}
|
[{'piece_type': 'error message', 'piece_content': '(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f\\nrac 0.7 --device-pool-frac 0.8\\ndistributed.worker - WARNING - Compute Failed\\nFunction: subgraph_callable\\nargs: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26\\n0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550\\n1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394\\n2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227\\n3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000\\nkwargs: {}\\nException: FileNotFoundError(2, \\'No such file or directory\\')\\n\\nTraceback (most recent call last):\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>\\nmain(parse_args())\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main\\noutput_path=output_path,\\nFile "/nvtabular/nvtabular/workflow.py", line 876, in apply\\ndtypes=dtypes,\\nFile "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph\\nnum_threads=num_io_threads,\\nFile "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset\\nnum_threads,\\nFile "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset\\nout = client.compute(out).result()\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result\\nraise exc.with_traceback(tb)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__\\nreturn core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get\\nresult = _execute_task(task, cache)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task\\nreturn func(*(_execute_task(a, cache) for a in args))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply\\nreturn func(*args, **kwargs)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce\\ndf = func(*args, **kwargs)\\nFile "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op\\ngdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)\\nFile "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner\\nreturn func(*args, **kwds)\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op\\ncat_names=cat_names,\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode\\ncache, path, columns=selection_r, cache=cat_cache, cats_only=True\\nFile "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data\\nwith open(path, "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'gs://merlin-datasets/output/stats/categories/unique.C1.parquet\\''}]
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def _append_writer(self, path, schema=None, add_args=None, add_kwargs=None):
# Add additional args and kwargs
_args = add_args or []
_kwargs = tlz.merge(self.pwriter_kwargs, add_kwargs or {})
if self.bytes_io:
bio = BytesIO()
self.data_bios.append(bio)
self.data_writers.append(self.pwriter(bio, *_args, **_kwargs))
else:
f = fsspec.open(path, mode="wb").open()
self.data_files.append(f)
self.data_writers.append(self.pwriter(f, *_args, **_kwargs))
|
def _append_writer(self, path, schema=None, add_args=None, add_kwargs=None):
# Add additional args and kwargs
_args = add_args or []
_kwargs = tlz.merge(self.pwriter_kwargs, add_kwargs or {})
if self.bytes_io:
bio = BytesIO()
self.data_bios.append(bio)
self.data_writers.append(self.pwriter(bio, *_args, **_kwargs))
else:
self.data_writers.append(self.pwriter(path, *_args, **_kwargs))
|
[{'piece_type': 'error message', 'piece_content': '(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f\\nrac 0.7 --device-pool-frac 0.8\\ndistributed.worker - WARNING - Compute Failed\\nFunction: subgraph_callable\\nargs: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26\\n0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550\\n1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394\\n2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227\\n3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000\\nkwargs: {}\\nException: FileNotFoundError(2, \\'No such file or directory\\')\\n\\nTraceback (most recent call last):\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>\\nmain(parse_args())\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main\\noutput_path=output_path,\\nFile "/nvtabular/nvtabular/workflow.py", line 876, in apply\\ndtypes=dtypes,\\nFile "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph\\nnum_threads=num_io_threads,\\nFile "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset\\nnum_threads,\\nFile "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset\\nout = client.compute(out).result()\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result\\nraise exc.with_traceback(tb)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__\\nreturn core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get\\nresult = _execute_task(task, cache)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task\\nreturn func(*(_execute_task(a, cache) for a in args))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply\\nreturn func(*args, **kwargs)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce\\ndf = func(*args, **kwargs)\\nFile "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op\\ngdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)\\nFile "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner\\nreturn func(*args, **kwds)\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op\\ncat_names=cat_names,\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode\\ncache, path, columns=selection_r, cache=cat_cache, cats_only=True\\nFile "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data\\nwith open(path, "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'gs://merlin-datasets/output/stats/categories/unique.C1.parquet\\''}]
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def _close_writers(self):
md_dict = {}
for writer, path in zip(self.data_writers, self.data_paths):
fn = path.split(self.fs.sep)[-1]
md_dict[fn] = writer.close(metadata_file_path=fn)
for f in self.data_files:
f.close()
return md_dict
|
def _close_writers(self):
md_dict = {}
for writer, path in zip(self.data_writers, self.data_paths):
fn = path.split(self.fs.sep)[-1]
md_dict[fn] = writer.close(metadata_file_path=fn)
return md_dict
|
[{'piece_type': 'error message', 'piece_content': '(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f\\nrac 0.7 --device-pool-frac 0.8\\ndistributed.worker - WARNING - Compute Failed\\nFunction: subgraph_callable\\nargs: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26\\n0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550\\n1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394\\n2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227\\n3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000\\nkwargs: {}\\nException: FileNotFoundError(2, \\'No such file or directory\\')\\n\\nTraceback (most recent call last):\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>\\nmain(parse_args())\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main\\noutput_path=output_path,\\nFile "/nvtabular/nvtabular/workflow.py", line 876, in apply\\ndtypes=dtypes,\\nFile "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph\\nnum_threads=num_io_threads,\\nFile "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset\\nnum_threads,\\nFile "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset\\nout = client.compute(out).result()\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result\\nraise exc.with_traceback(tb)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__\\nreturn core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get\\nresult = _execute_task(task, cache)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task\\nreturn func(*(_execute_task(a, cache) for a in args))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply\\nreturn func(*args, **kwargs)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce\\ndf = func(*args, **kwargs)\\nFile "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op\\ngdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)\\nFile "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner\\nreturn func(*args, **kwds)\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op\\ncat_names=cat_names,\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode\\ncache, path, columns=selection_r, cache=cat_cache, cats_only=True\\nFile "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data\\nwith open(path, "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'gs://merlin-datasets/output/stats/categories/unique.C1.parquet\\''}]
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def fetch_table_data(
table_cache, path, cache="disk", cats_only=False, reader=None, columns=None, **kwargs
):
"""Utility to retrieve a cudf DataFrame from a cache (and add the
DataFrame to a cache if the element is missing). Note that `cats_only=True`
results in optimized logic for the `Categorify` transformation.
"""
table = table_cache.get(path, None)
if table and not isinstance(table, cudf.DataFrame):
if not cats_only:
return cudf.io.read_parquet(table, index=False)
df = cudf.io.read_parquet(table, index=False, columns=columns)
df.index.name = "labels"
df.reset_index(drop=False, inplace=True)
return df
reader = reader or cudf.io.read_parquet
if table is None:
if cache in ("device", "disk"):
table = reader(path, index=False, columns=columns, **kwargs)
elif cache == "host":
if reader == cudf.io.read_parquet:
# If the file is already in parquet format,
# we can just move the same bytes to host memory
with fsspec.open(path, "rb") as f:
table_cache[path] = BytesIO(f.read())
table = reader(table_cache[path], index=False, columns=columns, **kwargs)
else:
# Otherwise, we should convert the format to parquet
table = reader(path, index=False, columns=columns, **kwargs)
table_cache[path] = BytesIO()
table.to_parquet(table_cache[path])
if cats_only:
table.index.name = "labels"
table.reset_index(drop=False, inplace=True)
if cache == "device":
table_cache[path] = table.copy(deep=False)
return table
|
def fetch_table_data(
table_cache, path, cache="disk", cats_only=False, reader=None, columns=None, **kwargs
):
"""Utility to retrieve a cudf DataFrame from a cache (and add the
DataFrame to a cache if the element is missing). Note that `cats_only=True`
results in optimized logic for the `Categorify` transformation.
"""
table = table_cache.get(path, None)
if table and not isinstance(table, cudf.DataFrame):
if not cats_only:
return cudf.io.read_parquet(table, index=False)
df = cudf.io.read_parquet(table, index=False, columns=columns)
df.index.name = "labels"
df.reset_index(drop=False, inplace=True)
return df
reader = reader or cudf.io.read_parquet
if table is None:
if cache in ("device", "disk"):
table = reader(path, index=False, columns=columns, **kwargs)
elif cache == "host":
if reader == cudf.io.read_parquet:
# If the file is already in parquet format,
# we can just move the same bytes to host memory
with open(path, "rb") as f:
table_cache[path] = BytesIO(f.read())
table = reader(table_cache[path], index=False, columns=columns, **kwargs)
else:
# Otherwise, we should convert the format to parquet
table = reader(path, index=False, columns=columns, **kwargs)
table_cache[path] = BytesIO()
table.to_parquet(table_cache[path])
if cats_only:
table.index.name = "labels"
table.reset_index(drop=False, inplace=True)
if cache == "device":
table_cache[path] = table.copy(deep=False)
return table
|
[{'piece_type': 'error message', 'piece_content': '(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f\\nrac 0.7 --device-pool-frac 0.8\\ndistributed.worker - WARNING - Compute Failed\\nFunction: subgraph_callable\\nargs: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26\\n0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550\\n1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394\\n2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227\\n3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000\\nkwargs: {}\\nException: FileNotFoundError(2, \\'No such file or directory\\')\\n\\nTraceback (most recent call last):\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>\\nmain(parse_args())\\nFile "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main\\noutput_path=output_path,\\nFile "/nvtabular/nvtabular/workflow.py", line 876, in apply\\ndtypes=dtypes,\\nFile "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph\\nnum_threads=num_io_threads,\\nFile "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset\\nnum_threads,\\nFile "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset\\nout = client.compute(out).result()\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result\\nraise exc.with_traceback(tb)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__\\nreturn core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get\\nresult = _execute_task(task, cache)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task\\nreturn func(*(_execute_task(a, cache) for a in args))\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply\\nreturn func(*args, **kwargs)\\nFile "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce\\ndf = func(*args, **kwargs)\\nFile "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op\\ngdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)\\nFile "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner\\nreturn func(*args, **kwds)\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op\\ncat_names=cat_names,\\nFile "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode\\ncache, path, columns=selection_r, cache=cat_cache, cats_only=True\\nFile "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data\\nwith open(path, "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'gs://merlin-datasets/output/stats/categories/unique.C1.parquet\\''}]
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def _chunkwise_moments(df):
df2 = cudf.DataFrame()
for col in df.columns:
df2[col] = df[col].astype("float64").pow(2)
vals = {
"df-count": df.count().to_frame().transpose(),
"df-sum": df.sum().astype("float64").to_frame().transpose(),
"df2-sum": df2.sum().to_frame().transpose(),
}
# NOTE: Perhaps we should convert to pandas here
# (since we know the results should be small)?
del df2
return vals
|
def _chunkwise_moments(df):
df2 = cudf.DataFrame()
for col in df.columns:
df2[col] = df[col].astype("float64").pow(2)
vals = {
"df-count": df.count().to_frame().transpose(),
"df-sum": df.sum().to_frame().transpose(),
"df2-sum": df2.sum().to_frame().transpose(),
}
# NOTE: Perhaps we should convert to pandas here
# (since we know the results should be small)?
del df2
return vals
|
[{'piece_type': 'error message', 'piece_content': '/opt/conda/envs/rapids/lib/python3.7/site-packages/pandas/core/series.py:726: RuntimeWarning: invalid value encountered in sqrt\\nresult = getattr(ufunc, method)(*inputs, **kwargs)\\n---------------------------------------------------------------------------\\nValueError Traceback (most recent call last)\\n<timed eval> in <module>\\n\\n/nvtabular0.3/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)\\n869 out_files_per_proc=out_files_per_proc,\\n870 num_io_threads=num_io_threads,\\n--> 871 dtypes=dtypes,\\n872 )\\n873 else:\\n\\n/nvtabular0.3/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)\\n968 self._base_phase = 0 # Set _base_phase\\n969 for idx, _ in enumerate(self.phases[:end]):\\n--> 970 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))\\n971 self._base_phase = 0 # Re-Set _base_phase\\n972\\n\\n/nvtabular0.3/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)\\n755 _ddf = self.get_ddf()\\n756 if transforms:\\n--> 757 _ddf = self._aggregated_dask_transform(_ddf, transforms)\\n758\\n759 stats = []\\n\\n/nvtabular0.3/NVTabular/nvtabular/workflow.py in _aggregated_dask_transform(self, ddf, transforms)\\n724 for transform in transforms:\\n725 columns_ctx, cols_grp, target_cols, logic, stats_context = transform\\n--> 726 meta = logic(meta, columns_ctx, cols_grp, target_cols, stats_context)\\n727 return ddf.map_partitions(self.__class__._aggregated_op, transforms, meta=meta)\\n728\\n\\n/nvtabular0.3/NVTabular/nvtabular/ops/transform_operator.py in apply_op(self, gdf, columns_ctx, input_cols, target_cols, stats_context)\\n89 new_gdf = self.op_logic(gdf, target_columns, stats_context=stats_context)\\n90 self.update_columns_ctx(columns_ctx, input_cols, new_gdf.columns, target_columns)\\n---> 91 return self.assemble_new_df(gdf, new_gdf, target_columns)\\n92\\n93 def assemble_new_df(self, origin_gdf, new_gdf, target_columns):\\n\\n/nvtabular0.3/NVTabular/nvtabular/ops/transform_operator.py in assemble_new_df(self, origin_gdf, new_gdf, target_columns)\\n96 return new_gdf\\n97 else:\\n---> 98 origin_gdf[target_columns] = new_gdf\\n99 return origin_gdf\\n100 return cudf.concat([origin_gdf, new_gdf], axis=1)\\n\\n/opt/conda/envs/rapids/lib/python3.7/contextlib.py in inner(*args, **kwds)\\n72 def inner(*args, **kwds):\\n73 with self._recreate_cm():\\n---> 74 return func(*args, **kwds)\\n75 return inner\\n76\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/cudf/core/dataframe.py in __setitem__(self, arg, value)\\n777 replace_df=value,\\n778 input_cols=arg,\\n--> 779 mask=None,\\n780 )\\n781 else:\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/cudf/core/dataframe.py in _setitem_with_dataframe(input_df, replace_df, input_cols, mask)\\n7266 if len(input_cols) != len(replace_df.columns):\\n7267 raise ValueError(\\n-> 7268 "Number of Input Columns must be same replacement Dataframe"\\n7269 )\\n7270\\n\\nValueError: Number of Input Columns must be same replacement Dataframe'}, {'piece_type': 'other', 'piece_content': 'ds = nvt.Dataset(INPUT_PATH, engine="parquet", part_size="1000MB")\\n\\ncat_names = []\\ncont_names = [\\'NetworkTestLatency\\']\\nlabel_name = []\\n\\n# Initalize our Workflow\\nworkflow = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\\n\\n\\n# Add Normalize to the workflow for continuous columns\\nworkflow.add_cont_feature(nvt.ops.Normalize())\\n\\n# Finalize the Workflow\\nworkflow.finalize()\\n\\n%%time\\n\\nworkflow.apply(\\nds,\\noutput_format="parquet",\\noutput_path=OUTPUT_PATH,\\nshuffle=None,\\nout_files_per_proc=1,\\n)'}]
|
/opt/conda/envs/rapids/lib/python3.7/site-packages/pandas/core/series.py:726: RuntimeWarning: invalid value encountered in sqrt
result = getattr(ufunc, method)(*inputs, **kwargs)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<timed eval> in <module>
/nvtabular0.3/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)
869 out_files_per_proc=out_files_per_proc,
870 num_io_threads=num_io_threads,
--> 871 dtypes=dtypes,
872 )
873 else:
/nvtabular0.3/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)
968 self._base_phase = 0 # Set _base_phase
969 for idx, _ in enumerate(self.phases[:end]):
--> 970 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))
971 self._base_phase = 0 # Re-Set _base_phase
972
/nvtabular0.3/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)
755 _ddf = self.get_ddf()
756 if transforms:
--> 757 _ddf = self._aggregated_dask_transform(_ddf, transforms)
758
759 stats = []
/nvtabular0.3/NVTabular/nvtabular/workflow.py in _aggregated_dask_transform(self, ddf, transforms)
724 for transform in transforms:
725 columns_ctx, cols_grp, target_cols, logic, stats_context = transform
--> 726 meta = logic(meta, columns_ctx, cols_grp, target_cols, stats_context)
727 return ddf.map_partitions(self.__class__._aggregated_op, transforms, meta=meta)
728
/nvtabular0.3/NVTabular/nvtabular/ops/transform_operator.py in apply_op(self, gdf, columns_ctx, input_cols, target_cols, stats_context)
89 new_gdf = self.op_logic(gdf, target_columns, stats_context=stats_context)
90 self.update_columns_ctx(columns_ctx, input_cols, new_gdf.columns, target_columns)
---> 91 return self.assemble_new_df(gdf, new_gdf, target_columns)
92
93 def assemble_new_df(self, origin_gdf, new_gdf, target_columns):
/nvtabular0.3/NVTabular/nvtabular/ops/transform_operator.py in assemble_new_df(self, origin_gdf, new_gdf, target_columns)
96 return new_gdf
97 else:
---> 98 origin_gdf[target_columns] = new_gdf
99 return origin_gdf
100 return cudf.concat([origin_gdf, new_gdf], axis=1)
/opt/conda/envs/rapids/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
/opt/conda/envs/rapids/lib/python3.7/site-packages/cudf/core/dataframe.py in __setitem__(self, arg, value)
777 replace_df=value,
778 input_cols=arg,
--> 779 mask=None,
780 )
781 else:
/opt/conda/envs/rapids/lib/python3.7/site-packages/cudf/core/dataframe.py in _setitem_with_dataframe(input_df, replace_df, input_cols, mask)
7266 if len(input_cols) != len(replace_df.columns):
7267 raise ValueError(
-> 7268 "Number of Input Columns must be same replacement Dataframe"
7269 )
7270
ValueError: Number of Input Columns must be same replacement Dataframe
|
ValueError
|
def to_ddf(self, columns=None):
return dask_cudf.read_parquet(
self.paths,
columns=columns,
# can't omit reading the index in if we aren't being passed columns
index=None if columns is None else False,
gather_statistics=False,
split_row_groups=self.row_groups_per_part,
storage_options=self.storage_options,
)
|
def to_ddf(self, columns=None):
return dask_cudf.read_parquet(
self.paths,
columns=columns,
index=False,
gather_statistics=False,
split_row_groups=self.row_groups_per_part,
storage_options=self.storage_options,
)
|
[{'piece_type': 'error message', 'piece_content': '---------------------------------------------------------------------------\\nValueError Traceback (most recent call last)\\n<ipython-input-13-b133e2b51cbf> in <module>\\n2 valid_dataset = nvt.Dataset(OUTPUT_BUCKET_FOLDER+\\'valid_gdf.parquet\\', part_mem_fraction=0.12)\\n3\\n----> 4 workflow.apply(train_dataset, record_stats=True, output_path=output_train_dir, shuffle=True, out_files_per_proc=5)\\n5 workflow.apply(valid_dataset, record_stats=False, output_path=output_valid_dir, shuffle=False, out_files_per_proc=5)\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)\\n782 out_files_per_proc=out_files_per_proc,\\n783 num_io_threads=num_io_threads,\\n--> 784 dtypes=dtypes,\\n785 )\\n786 else:\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)\\n885 self._base_phase = 0 # Set _base_phase\\n886 for idx, _ in enumerate(self.phases[:end]):\\n--> 887 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))\\n888 self._base_phase = 0 # Re-Set _base_phase\\n889\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)\\n631\\n632 # Perform transforms as single dask task (per ddf partition)\\n--> 633 _ddf = self.get_ddf()\\n634 if transforms:\\n635 _ddf = self._aggregated_dask_transform(_ddf, transforms)\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in get_ddf(self)\\n587 elif isinstance(self.ddf, Dataset):\\n588 columns = self.columns_ctx["all"]["base"]\\n--> 589 return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)\\n590 return self.ddf\\n591\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/io/dataset.py in to_ddf(self, columns, shuffle, seed)\\n263 """\\n264 # Use DatasetEngine to create ddf\\n--> 265 ddf = self.engine.to_ddf(columns=columns)\\n266\\n267 # Shuffle the partitions of ddf (optional)\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/io/parquet.py in to_ddf(self, columns)\\n102 gather_statistics=False,\\n103 split_row_groups=self.row_groups_per_part,\\n--> 104 storage_options=self.storage_options,\\n105 )\\n106\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/dask_cudf/io/parquet.py in read_parquet(path, columns, split_row_groups, row_groups_per_part, **kwargs)\\n192 split_row_groups=split_row_groups,\\n193 engine=CudfEngine,\\n--> 194 **kwargs,\\n195 )\\n196\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs)\\n248 # Modify `meta` dataframe accordingly\\n249 meta, index, columns = set_index_columns(\\n--> 250 meta, index, columns, index_in_columns, auto_index_allowed\\n251 )\\n252 if meta.index.name == NONE_LABEL:\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed)\\n771 "The following columns were not found in the dataset %s\\\\n"\\n772 "The following columns were found %s"\\n--> 773 % (set(columns) - set(meta.columns), meta.columns)\\n774 )\\n775\\n\\nValueError: The following columns were not found in the dataset {\\'document_id_promo_count\\', \\'publish_time_days_since_published\\', \\'campaign_id_clicked_sum_ctr\\', \\'ad_id_count\\', \\'ad_id_clicked_sum_ctr\\', \\'source_id_clicked_sum_ctr\\', \\'publish_time_promo_days_since_published\\', \\'advertiser_id_clicked_sum_ctr\\', \\'document_id_promo_clicked_sum_ctr\\', \\'publisher_id_clicked_sum_ctr\\', \\'geo_location_country\\', \\'geo_location_state\\'}\\nThe following columns were found Index([\\'display_id\\', \\'ad_id\\', \\'clicked\\', \\'uuid\\', \\'document_id\\', \\'timestamp\\',\\n\\'platform\\', \\'geo_location\\', \\'document_id_promo\\', \\'campaign_id\\',\\n\\'advertiser_id\\', \\'source_id\\', \\'publisher_id\\', \\'publish_time\\',\\n\\'source_id_promo\\', \\'publisher_id_promo\\', \\'publish_time_promo\\',\\n\\'day_event\\'],\\ndtype=\\'object\\')'}, {'piece_type': 'reproducing source code', 'piece_content': 'import cudf\\nimport nvtabular as nvt\\nfrom nvtabular.ops import LambdaOp, Categorify\\n\\n# Stripped down dataset with geo_locaiton codes like in outbrains\\ndf = cudf.DataFrame({"geo_location": ["US>CA", "CA>BC", "US>TN>659"]})\\n\\n# defining a simple workflow that strips out the country code from the first two digits of the\\n# geo_location code and sticks in a new \\'geo_location_country\\' field\\nCATEGORICAL_COLUMNS = ["geo_location", "geo_location_country"]\\nworkflow = nvt.Workflow(cat_names=CATEGORICAL_COLUMNS, cont_names=[], label_name=[])\\nworkflow.add_feature(\\n[\\nLambdaOp(\\nop_name="country",\\nf=lambda col, gdf: col.str.slice(0, 2),\\ncolumns=["geo_location"],\\nreplace=False,\\n),\\nCategorify(),\\n]\\n)\\nworkflow.finalize()\\n\\n# This fails because \\'geo_location_country\\' isn\\'t in the parquet file, but we\\'re listing\\n# as a column\\ndf.to_parquet("geo.parquet")\\nworkflow.apply(nvt.Dataset("geo.parquet"), output_path=None)'}]
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-13-b133e2b51cbf> in <module>
2 valid_dataset = nvt.Dataset(OUTPUT_BUCKET_FOLDER+'valid_gdf.parquet', part_mem_fraction=0.12)
3
----> 4 workflow.apply(train_dataset, record_stats=True, output_path=output_train_dir, shuffle=True, out_files_per_proc=5)
5 workflow.apply(valid_dataset, record_stats=False, output_path=output_valid_dir, shuffle=False, out_files_per_proc=5)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)
782 out_files_per_proc=out_files_per_proc,
783 num_io_threads=num_io_threads,
--> 784 dtypes=dtypes,
785 )
786 else:
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)
885 self._base_phase = 0 # Set _base_phase
886 for idx, _ in enumerate(self.phases[:end]):
--> 887 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))
888 self._base_phase = 0 # Re-Set _base_phase
889
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)
631
632 # Perform transforms as single dask task (per ddf partition)
--> 633 _ddf = self.get_ddf()
634 if transforms:
635 _ddf = self._aggregated_dask_transform(_ddf, transforms)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in get_ddf(self)
587 elif isinstance(self.ddf, Dataset):
588 columns = self.columns_ctx["all"]["base"]
--> 589 return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)
590 return self.ddf
591
/rapids/notebooks/benf/NVTabular/nvtabular/io/dataset.py in to_ddf(self, columns, shuffle, seed)
263 """
264 # Use DatasetEngine to create ddf
--> 265 ddf = self.engine.to_ddf(columns=columns)
266
267 # Shuffle the partitions of ddf (optional)
/rapids/notebooks/benf/NVTabular/nvtabular/io/parquet.py in to_ddf(self, columns)
102 gather_statistics=False,
103 split_row_groups=self.row_groups_per_part,
--> 104 storage_options=self.storage_options,
105 )
106
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask_cudf/io/parquet.py in read_parquet(path, columns, split_row_groups, row_groups_per_part, **kwargs)
192 split_row_groups=split_row_groups,
193 engine=CudfEngine,
--> 194 **kwargs,
195 )
196
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs)
248 # Modify `meta` dataframe accordingly
249 meta, index, columns = set_index_columns(
--> 250 meta, index, columns, index_in_columns, auto_index_allowed
251 )
252 if meta.index.name == NONE_LABEL:
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed)
771 "The following columns were not found in the dataset %s\\n"
772 "The following columns were found %s"
--> 773 % (set(columns) - set(meta.columns), meta.columns)
774 )
775
ValueError: The following columns were not found in the dataset {'document_id_promo_count', 'publish_time_days_since_published', 'campaign_id_clicked_sum_ctr', 'ad_id_count', 'ad_id_clicked_sum_ctr', 'source_id_clicked_sum_ctr', 'publish_time_promo_days_since_published', 'advertiser_id_clicked_sum_ctr', 'document_id_promo_clicked_sum_ctr', 'publisher_id_clicked_sum_ctr', 'geo_location_country', 'geo_location_state'}
The following columns were found Index(['display_id', 'ad_id', 'clicked', 'uuid', 'document_id', 'timestamp',
'platform', 'geo_location', 'document_id_promo', 'campaign_id',
'advertiser_id', 'source_id', 'publisher_id', 'publish_time',
'source_id_promo', 'publisher_id_promo', 'publish_time_promo',
'day_event'],
dtype='object')
|
ValueError
|
def get_ddf(self):
if self.ddf is None:
raise ValueError("No dask_cudf frame available.")
elif isinstance(self.ddf, Dataset):
# Right now we can't distinguish between input columns and generated columns
# in the dataset, we don't limit the columm set right now in the to_ddf call
# (https://github.com/NVIDIA/NVTabular/issues/409 )
return self.ddf.to_ddf(shuffle=self._shuffle_parts)
return self.ddf
|
def get_ddf(self):
if self.ddf is None:
raise ValueError("No dask_cudf frame available.")
elif isinstance(self.ddf, Dataset):
columns = self.columns_ctx["all"]["base"]
return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)
return self.ddf
|
[{'piece_type': 'error message', 'piece_content': '---------------------------------------------------------------------------\\nValueError Traceback (most recent call last)\\n<ipython-input-13-b133e2b51cbf> in <module>\\n2 valid_dataset = nvt.Dataset(OUTPUT_BUCKET_FOLDER+\\'valid_gdf.parquet\\', part_mem_fraction=0.12)\\n3\\n----> 4 workflow.apply(train_dataset, record_stats=True, output_path=output_train_dir, shuffle=True, out_files_per_proc=5)\\n5 workflow.apply(valid_dataset, record_stats=False, output_path=output_valid_dir, shuffle=False, out_files_per_proc=5)\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)\\n782 out_files_per_proc=out_files_per_proc,\\n783 num_io_threads=num_io_threads,\\n--> 784 dtypes=dtypes,\\n785 )\\n786 else:\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)\\n885 self._base_phase = 0 # Set _base_phase\\n886 for idx, _ in enumerate(self.phases[:end]):\\n--> 887 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))\\n888 self._base_phase = 0 # Re-Set _base_phase\\n889\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)\\n631\\n632 # Perform transforms as single dask task (per ddf partition)\\n--> 633 _ddf = self.get_ddf()\\n634 if transforms:\\n635 _ddf = self._aggregated_dask_transform(_ddf, transforms)\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in get_ddf(self)\\n587 elif isinstance(self.ddf, Dataset):\\n588 columns = self.columns_ctx["all"]["base"]\\n--> 589 return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)\\n590 return self.ddf\\n591\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/io/dataset.py in to_ddf(self, columns, shuffle, seed)\\n263 """\\n264 # Use DatasetEngine to create ddf\\n--> 265 ddf = self.engine.to_ddf(columns=columns)\\n266\\n267 # Shuffle the partitions of ddf (optional)\\n\\n/rapids/notebooks/benf/NVTabular/nvtabular/io/parquet.py in to_ddf(self, columns)\\n102 gather_statistics=False,\\n103 split_row_groups=self.row_groups_per_part,\\n--> 104 storage_options=self.storage_options,\\n105 )\\n106\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/dask_cudf/io/parquet.py in read_parquet(path, columns, split_row_groups, row_groups_per_part, **kwargs)\\n192 split_row_groups=split_row_groups,\\n193 engine=CudfEngine,\\n--> 194 **kwargs,\\n195 )\\n196\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs)\\n248 # Modify `meta` dataframe accordingly\\n249 meta, index, columns = set_index_columns(\\n--> 250 meta, index, columns, index_in_columns, auto_index_allowed\\n251 )\\n252 if meta.index.name == NONE_LABEL:\\n\\n/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed)\\n771 "The following columns were not found in the dataset %s\\\\n"\\n772 "The following columns were found %s"\\n--> 773 % (set(columns) - set(meta.columns), meta.columns)\\n774 )\\n775\\n\\nValueError: The following columns were not found in the dataset {\\'document_id_promo_count\\', \\'publish_time_days_since_published\\', \\'campaign_id_clicked_sum_ctr\\', \\'ad_id_count\\', \\'ad_id_clicked_sum_ctr\\', \\'source_id_clicked_sum_ctr\\', \\'publish_time_promo_days_since_published\\', \\'advertiser_id_clicked_sum_ctr\\', \\'document_id_promo_clicked_sum_ctr\\', \\'publisher_id_clicked_sum_ctr\\', \\'geo_location_country\\', \\'geo_location_state\\'}\\nThe following columns were found Index([\\'display_id\\', \\'ad_id\\', \\'clicked\\', \\'uuid\\', \\'document_id\\', \\'timestamp\\',\\n\\'platform\\', \\'geo_location\\', \\'document_id_promo\\', \\'campaign_id\\',\\n\\'advertiser_id\\', \\'source_id\\', \\'publisher_id\\', \\'publish_time\\',\\n\\'source_id_promo\\', \\'publisher_id_promo\\', \\'publish_time_promo\\',\\n\\'day_event\\'],\\ndtype=\\'object\\')'}, {'piece_type': 'reproducing source code', 'piece_content': 'import cudf\\nimport nvtabular as nvt\\nfrom nvtabular.ops import LambdaOp, Categorify\\n\\n# Stripped down dataset with geo_locaiton codes like in outbrains\\ndf = cudf.DataFrame({"geo_location": ["US>CA", "CA>BC", "US>TN>659"]})\\n\\n# defining a simple workflow that strips out the country code from the first two digits of the\\n# geo_location code and sticks in a new \\'geo_location_country\\' field\\nCATEGORICAL_COLUMNS = ["geo_location", "geo_location_country"]\\nworkflow = nvt.Workflow(cat_names=CATEGORICAL_COLUMNS, cont_names=[], label_name=[])\\nworkflow.add_feature(\\n[\\nLambdaOp(\\nop_name="country",\\nf=lambda col, gdf: col.str.slice(0, 2),\\ncolumns=["geo_location"],\\nreplace=False,\\n),\\nCategorify(),\\n]\\n)\\nworkflow.finalize()\\n\\n# This fails because \\'geo_location_country\\' isn\\'t in the parquet file, but we\\'re listing\\n# as a column\\ndf.to_parquet("geo.parquet")\\nworkflow.apply(nvt.Dataset("geo.parquet"), output_path=None)'}]
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-13-b133e2b51cbf> in <module>
2 valid_dataset = nvt.Dataset(OUTPUT_BUCKET_FOLDER+'valid_gdf.parquet', part_mem_fraction=0.12)
3
----> 4 workflow.apply(train_dataset, record_stats=True, output_path=output_train_dir, shuffle=True, out_files_per_proc=5)
5 workflow.apply(valid_dataset, record_stats=False, output_path=output_valid_dir, shuffle=False, out_files_per_proc=5)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)
782 out_files_per_proc=out_files_per_proc,
783 num_io_threads=num_io_threads,
--> 784 dtypes=dtypes,
785 )
786 else:
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)
885 self._base_phase = 0 # Set _base_phase
886 for idx, _ in enumerate(self.phases[:end]):
--> 887 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))
888 self._base_phase = 0 # Re-Set _base_phase
889
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)
631
632 # Perform transforms as single dask task (per ddf partition)
--> 633 _ddf = self.get_ddf()
634 if transforms:
635 _ddf = self._aggregated_dask_transform(_ddf, transforms)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in get_ddf(self)
587 elif isinstance(self.ddf, Dataset):
588 columns = self.columns_ctx["all"]["base"]
--> 589 return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)
590 return self.ddf
591
/rapids/notebooks/benf/NVTabular/nvtabular/io/dataset.py in to_ddf(self, columns, shuffle, seed)
263 """
264 # Use DatasetEngine to create ddf
--> 265 ddf = self.engine.to_ddf(columns=columns)
266
267 # Shuffle the partitions of ddf (optional)
/rapids/notebooks/benf/NVTabular/nvtabular/io/parquet.py in to_ddf(self, columns)
102 gather_statistics=False,
103 split_row_groups=self.row_groups_per_part,
--> 104 storage_options=self.storage_options,
105 )
106
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask_cudf/io/parquet.py in read_parquet(path, columns, split_row_groups, row_groups_per_part, **kwargs)
192 split_row_groups=split_row_groups,
193 engine=CudfEngine,
--> 194 **kwargs,
195 )
196
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs)
248 # Modify `meta` dataframe accordingly
249 meta, index, columns = set_index_columns(
--> 250 meta, index, columns, index_in_columns, auto_index_allowed
251 )
252 if meta.index.name == NONE_LABEL:
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed)
771 "The following columns were not found in the dataset %s\\n"
772 "The following columns were found %s"
--> 773 % (set(columns) - set(meta.columns), meta.columns)
774 )
775
ValueError: The following columns were not found in the dataset {'document_id_promo_count', 'publish_time_days_since_published', 'campaign_id_clicked_sum_ctr', 'ad_id_count', 'ad_id_clicked_sum_ctr', 'source_id_clicked_sum_ctr', 'publish_time_promo_days_since_published', 'advertiser_id_clicked_sum_ctr', 'document_id_promo_clicked_sum_ctr', 'publisher_id_clicked_sum_ctr', 'geo_location_country', 'geo_location_state'}
The following columns were found Index(['display_id', 'ad_id', 'clicked', 'uuid', 'document_id', 'timestamp',
'platform', 'geo_location', 'document_id_promo', 'campaign_id',
'advertiser_id', 'source_id', 'publisher_id', 'publish_time',
'source_id_promo', 'publisher_id_promo', 'publish_time_promo',
'day_event'],
dtype='object')
|
ValueError
|
def add_data(self, gdf):
# Populate columns idxs
if not self.col_idx:
for i, x in enumerate(gdf.columns.values):
self.col_idx[str(x)] = i
# list columns in cudf don't currently support chunked writing in parquet.
# hack around this by just writing a single file with this partition
# this restriction can be removed once cudf supports chunked writing
# in parquet
if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
self._write_table(0, gdf, True)
return
# Generate `ind` array to map each row to an output file.
# This approach is certainly more optimized for shuffling
# than it is for non-shuffling, but using a single code
# path is probably worth the (possible) minor overhead.
nrows = gdf.shape[0]
typ = np.min_scalar_type(nrows * 2)
if self.shuffle:
ind = cp.random.choice(cp.arange(self.num_out_files, dtype=typ), nrows)
else:
ind = cp.arange(nrows, dtype=typ)
cp.floor_divide(ind, math.ceil(nrows / self.num_out_files), out=ind)
for x, group in enumerate(
gdf.scatter_by_map(ind, map_size=self.num_out_files, keep_index=False)
):
self.num_samples[x] += len(group)
if self.num_threads > 1:
self.queue.put((x, group))
else:
self._write_table(x, group)
# wait for all writes to finish before exiting
# (so that we aren't using memory)
if self.num_threads > 1:
self.queue.join()
|
def add_data(self, gdf):
# Populate columns idxs
if not self.col_idx:
for i, x in enumerate(gdf.columns.values):
self.col_idx[str(x)] = i
# list columns in cudf don't currently support chunked writing in parquet.
# hack around this by just writing a single file with this partition
# this restriction can be removed once cudf supports chunked writing
# in parquet
if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
self._write_table(gdf, 0, True)
return
# Generate `ind` array to map each row to an output file.
# This approach is certainly more optimized for shuffling
# than it is for non-shuffling, but using a single code
# path is probably worth the (possible) minor overhead.
nrows = gdf.shape[0]
typ = np.min_scalar_type(nrows * 2)
if self.shuffle:
ind = cp.random.choice(cp.arange(self.num_out_files, dtype=typ), nrows)
else:
ind = cp.arange(nrows, dtype=typ)
cp.floor_divide(ind, math.ceil(nrows / self.num_out_files), out=ind)
for x, group in enumerate(
gdf.scatter_by_map(ind, map_size=self.num_out_files, keep_index=False)
):
self.num_samples[x] += len(group)
if self.num_threads > 1:
self.queue.put((x, group))
else:
self._write_table(x, group)
# wait for all writes to finish before exiting
# (so that we aren't using memory)
if self.num_threads > 1:
self.queue.join()
|
[{'piece_type': 'other', 'piece_content': "df = cudf.DataFrame({'doc_id': [1, 1, 2, 2, 3, 3, 4, 4], 'category_id': [1, 2, 3, 3, 5, 6, 6, 1], 'confidence_level': [0.92, 0.251, 0.352, 0.359, 0.978, 0.988, 0.978, 0.988]})\\n\\ndf_grouped = df.groupby('doc_id', as_index=False).agg({'category_id': ['collect'], 'confidence_level': ['collect']})\\n\\ndf_grouped.columns= df_grouped.columns.get_level_values(0)\\n\\ndf2 = cudf.DataFrame({'doc_id': [1, 2, 2, 3, 4, 3, 7, 8], 'category_id': [1, 2, 4, 3, 6, 6, 5, 2], 'ad_id': [1, 2, 3, 4, 4, 5, 10, 12],\\n'source_id': [1200, 1210, 1450, np.nan, 1330, 1200, 1500, 1350]})\\n\\ncolumns_ext = ['doc_id', 'category_id', 'confidence_level']\\nkind_ext='cudf'\\nproc = nvt.Workflow(\\ncat_names= ['doc_id', 'category_id', 'ad_id', 'source_id'],\\ncont_names=[],\\nlabel_name=[])\\n\\nproc.add_preprocess(JoinExternal(df_grouped, on= ['doc_id'], on_ext= ['doc_id'], kind_ext=kind_ext, columns_ext=columns_ext, cache='device', how='left'))\\ntrain_dataset = nvt.Dataset(df2)\\nproc.apply(train_dataset, apply_offline=True, record_stats=True, output_path='./output/', shuffle=True, out_files_per_proc=1)"}, {'piece_type': 'error message', 'piece_content': '---------------------------------------------------------------------------\\nAttributeError Traceback (most recent call last)\\n<ipython-input-19-f93c44c3b381> in <module>\\n11 proc.add_preprocess(JoinExternal(df_grouped, on= [\\'doc_id\\'], on_ext= [\\'doc_id\\'], kind_ext=kind_ext, columns_ext=columns_ext, cache=\\'device\\', how=\\'left\\'))\\n12 train_dataset = nvt.Dataset(df2)\\n---> 13 proc.apply(train_dataset, apply_offline=True, record_stats=True, output_path=\\'./output/\\', shuffle=True, out_files_per_proc=1)\\n\\n~/ronaya/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads)\\n738 output_format=output_format,\\n739 out_files_per_proc=out_files_per_proc,\\n--> 740 num_io_threads=num_io_threads,\\n741 )\\n742 else:\\n\\n~/ronaya/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads)\\n845 shuffle=shuffle,\\n846 out_files_per_proc=out_files_per_proc,\\n--> 847 num_threads=num_io_threads,\\n848 )\\n849\\n\\n~/ronaya/NVTabular/nvtabular/workflow.py in ddf_to_dataset(self, output_path, shuffle, out_files_per_proc, output_format, num_threads)\\n931 output_format,\\n932 self.client,\\n--> 933 num_threads,\\n934 )\\n935 return\\n\\n~/ronaya/NVTabular/nvtabular/io/dask.py in _ddf_to_dataset(ddf, fs, output_path, shuffle, out_files_per_proc, cat_names, cont_names, label_names, output_format, client, num_threads)\\n110 out = client.compute(out).result()\\n111 else:\\n--> 112 out = dask.compute(out, scheduler="synchronous")[0]\\n113\\n114 # Follow-up Shuffling and _metadata creation\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/base.py in compute(*args, **kwargs)\\n450 postcomputes.append(x.__dask_postcompute__())\\n451\\n--> 452 results = schedule(dsk, keys, **kwargs)\\n453 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])\\n454\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_sync(dsk, keys, **kwargs)\\n525 """\\n526 kwargs.pop("num_workers", None) # if num_workers present, remove it\\n--> 527 return get_async(apply_sync, 1, dsk, keys, **kwargs)\\n528\\n529\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)\\n492\\n493 while state["ready"] and len(state["running"]) < num_workers:\\n--> 494 fire_task()\\n495\\n496 succeeded = True\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in fire_task()\\n464 pack_exception,\\n465 ),\\n--> 466 callback=queue.put,\\n467 )\\n468\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in apply_sync(func, args, kwds, callback)\\n514 def apply_sync(func, args=(), kwds={}, callback=None):\\n515 """ A naive synchronous version of apply_async """\\n--> 516 res = func(*args, **kwds)\\n517 if callback is not None:\\n518 callback(res)\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)\\n225 failed = False\\n226 except BaseException as e:\\n--> 227 result = pack_exception(e, dumps)\\n228 failed = True\\n229 return key, result, failed\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)\\n220 try:\\n221 task, data = loads(task_info)\\n--> 222 result = _execute_task(task, data)\\n223 id = get_id()\\n224 result = dumps((result, id))\\n\\n~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/core.py in _execute_task(arg, cache, dsk)\\n119 # temporaries by their reference count and can execute certain\\n120 # operations in-place.\\n--> 121 return func(*(_execute_task(a, cache) for a in args))\\n122 elif not ishashable(arg):\\n123 return arg\\n\\n~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)\\n72 def inner(*args, **kwds):\\n73 with self._recreate_cm():\\n---> 74 return func(*args, **kwds)\\n75 return inner\\n76\\n\\n~/ronaya/NVTabular/nvtabular/io/dask.py in _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs, cat_names, cont_names, label_names, output_format, num_threads)\\n61\\n62 # Add data\\n---> 63 writer.add_data(gdf)\\n64\\n65 return gdf_size\\n\\n~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)\\n72 def inner(*args, **kwds):\\n73 with self._recreate_cm():\\n---> 74 return func(*args, **kwds)\\n75 return inner\\n76\\n\\n~/ronaya/NVTabular/nvtabular/io/writer.py in add_data(self, gdf)\\n125 # in parquet\\n126 if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):\\n--> 127 self._write_table(gdf, 0, True)\\n128 return\\n129\\n\\n~/ronaya/NVTabular/nvtabular/io/parquet.py in _write_table(self, idx, data, has_list_column)\\n210 # write out a new file, rather than stream multiple chunks to a single file\\n211 filename = self._get_filename(len(self.data_paths))\\n--> 212 data.to_parquet(filename)\\n213 self.data_paths.append(filename)\\n214 else:\\n\\nAttributeError: \\'int\\' object has no attribute \\'to_parquet\\''}]
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-f93c44c3b381> in <module>
11 proc.add_preprocess(JoinExternal(df_grouped, on= ['doc_id'], on_ext= ['doc_id'], kind_ext=kind_ext, columns_ext=columns_ext, cache='device', how='left'))
12 train_dataset = nvt.Dataset(df2)
---> 13 proc.apply(train_dataset, apply_offline=True, record_stats=True, output_path='./output/', shuffle=True, out_files_per_proc=1)
~/ronaya/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads)
738 output_format=output_format,
739 out_files_per_proc=out_files_per_proc,
--> 740 num_io_threads=num_io_threads,
741 )
742 else:
~/ronaya/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads)
845 shuffle=shuffle,
846 out_files_per_proc=out_files_per_proc,
--> 847 num_threads=num_io_threads,
848 )
849
~/ronaya/NVTabular/nvtabular/workflow.py in ddf_to_dataset(self, output_path, shuffle, out_files_per_proc, output_format, num_threads)
931 output_format,
932 self.client,
--> 933 num_threads,
934 )
935 return
~/ronaya/NVTabular/nvtabular/io/dask.py in _ddf_to_dataset(ddf, fs, output_path, shuffle, out_files_per_proc, cat_names, cont_names, label_names, output_format, client, num_threads)
110 out = client.compute(out).result()
111 else:
--> 112 out = dask.compute(out, scheduler="synchronous")[0]
113
114 # Follow-up Shuffling and _metadata creation
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/base.py in compute(*args, **kwargs)
450 postcomputes.append(x.__dask_postcompute__())
451
--> 452 results = schedule(dsk, keys, **kwargs)
453 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
454
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_sync(dsk, keys, **kwargs)
525 """
526 kwargs.pop("num_workers", None) # if num_workers present, remove it
--> 527 return get_async(apply_sync, 1, dsk, keys, **kwargs)
528
529
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
492
493 while state["ready"] and len(state["running"]) < num_workers:
--> 494 fire_task()
495
496 succeeded = True
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in fire_task()
464 pack_exception,
465 ),
--> 466 callback=queue.put,
467 )
468
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in apply_sync(func, args, kwds, callback)
514 def apply_sync(func, args=(), kwds={}, callback=None):
515 """ A naive synchronous version of apply_async """
--> 516 res = func(*args, **kwds)
517 if callback is not None:
518 callback(res)
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
225 failed = False
226 except BaseException as e:
--> 227 result = pack_exception(e, dumps)
228 failed = True
229 return key, result, failed
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
220 try:
221 task, data = loads(task_info)
--> 222 result = _execute_task(task, data)
223 id = get_id()
224 result = dumps((result, id))
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
~/ronaya/NVTabular/nvtabular/io/dask.py in _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs, cat_names, cont_names, label_names, output_format, num_threads)
61
62 # Add data
---> 63 writer.add_data(gdf)
64
65 return gdf_size
~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
~/ronaya/NVTabular/nvtabular/io/writer.py in add_data(self, gdf)
125 # in parquet
126 if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
--> 127 self._write_table(gdf, 0, True)
128 return
129
~/ronaya/NVTabular/nvtabular/io/parquet.py in _write_table(self, idx, data, has_list_column)
210 # write out a new file, rather than stream multiple chunks to a single file
211 filename = self._get_filename(len(self.data_paths))
--> 212 data.to_parquet(filename)
213 self.data_paths.append(filename)
214 else:
AttributeError: 'int' object has no attribute 'to_parquet'
|
AttributeError
|
def __init__(
self,
paths,
part_size,
storage_options,
row_groups_per_part=None,
legacy=False,
batch_size=None,
):
# TODO: Improve dask_cudf.read_parquet performance so that
# this class can be slimmed down.
super().__init__(paths, part_size, storage_options)
self.batch_size = batch_size
self._metadata, self._base = self.metadata
self._pieces = None
if row_groups_per_part is None:
file_path = self._metadata.row_group(0).column(0).file_path
path0 = (
self.fs.sep.join([self._base, file_path])
if file_path != ""
else self._base # This is a single file
)
if row_groups_per_part is None:
rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(path0, row_groups=0, row_group=0))
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
f"Row group size {rg_byte_size_0} is bigger than requested part_size "
f"{self.part_size}"
)
row_groups_per_part = 1.0
self.row_groups_per_part = int(row_groups_per_part)
assert self.row_groups_per_part > 0
|
def __init__(
self,
paths,
part_size,
storage_options,
row_groups_per_part=None,
legacy=False,
batch_size=None,
):
# TODO: Improve dask_cudf.read_parquet performance so that
# this class can be slimmed down.
super().__init__(paths, part_size, storage_options)
self.batch_size = batch_size
self._metadata, self._base = self.metadata
self._pieces = None
if row_groups_per_part is None:
file_path = self._metadata.row_group(0).column(0).file_path
path0 = (
self.fs.sep.join([self._base, file_path])
if file_path != ""
else self._base # This is a single file
)
if row_groups_per_part is None:
rg_byte_size_0 = (
cudf.io.read_parquet(path0, row_groups=0, row_group=0)
.memory_usage(deep=True, index=True)
.sum()
)
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
f"Row group size {rg_byte_size_0} is bigger than requested part_size "
f"{self.part_size}"
)
row_groups_per_part = 1.0
self.row_groups_per_part = int(row_groups_per_part)
assert self.row_groups_per_part > 0
|
[{'piece_type': 'other', 'piece_content': 'python dataloader_bench.py torch <PATH TO Folder with Parquet Files on local> parquet 0.2'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "main.py", line 106, in <module>\\nmain(args)\\nFile "main.py", line 61, in main\\ntrain_paths, engine="parquet", part_mem_fraction=float(args.gpu_mem_frac)\\nFile "/root/miniconda/lib/python3.7/site-packages/nvtabular/io/dataset.py", line 224, in __init__\\npaths, part_size, storage_options=storage_options, **kwargs\\nFile "/root/miniconda/lib/python3.7/site-packages/nvtabular/io/parquet.py", line 69, in __init__\\n.memory_usage(deep=True, index=True)\\nFile "/root/miniconda/lib/python3.7/site-packages/cudf/core/dataframe.py", line 842, in memory_usage\\nsizes = [col._memory_usage(deep=deep) for col in self._data.columns]\\nFile "/root/miniconda/lib/python3.7/site-packages/cudf/core/dataframe.py", line 842, in <listcomp>\\nsizes = [col._memory_usage(deep=deep) for col in self._data.columns]\\nFile "/root/miniconda/lib/python3.7/site-packages/cudf/core/column/column.py", line 299, in _memory_usage\\nreturn self.__sizeof__()\\nFile "/root/miniconda/lib/python3.7/site-packages/cudf/core/column/column.py", line 183, in __sizeof__\\nn = self.data.size\\nFile "cudf/_lib/column.pyx", line 99, in cudf._lib.column.Column.data.__get__\\nAttributeError: \\'ListDtype\\' object has no attribute \\'itemsize\\''}, {'piece_type': 'other', 'piece_content': '# packages in environment at /root/miniconda:\\n#\\n# Name Version Build Channel\\n_libgcc_mutex 0.1 main\\n_pytorch_select 0.1 cpu_0\\nabseil-cpp 20200225.2 he1b5a44_2 conda-forge\\narrow-cpp 0.17.1 py37h1234567_11_cuda conda-forge\\narrow-cpp-proc 1.0.1 cuda conda-forge\\naws-sdk-cpp 1.7.164 hba45d7a_2 conda-forge\\nblas 1.0 mkl\\nbokeh 2.2.2 py37_0\\nboost-cpp 1.72.0 h9359b55_3 conda-forge\\nbrotli 1.0.9 he6710b0_2\\nbrotlipy 0.7.0 py37h7b6447c_1000\\nbzip2 1.0.8 h7b6447c_0\\nc-ares 1.16.1 h7b6447c_0\\nca-certificates 2020.10.14 0\\ncertifi 2020.6.20 py37_0\\ncffi 1.14.3 py37he30daa8_0\\nchardet 3.0.4 py37_1003\\nclick 7.1.2 py_0\\ncloudpickle 1.6.0 py_0\\nconda 4.9.0 py37he5f6b98_0 conda-forge\\nconda-package-handling 1.7.2 py37h03888b9_0\\ncryptography 3.1.1 py37h1ba5d50_0\\ncudatoolkit 10.2.89 h6bb024c_0 nvidia\\ncudf 0.15.0 cuda_10.2_py37_g71cb8c0e0_0 rapidsai\\ncudnn 7.6.5 cuda10.2_0\\ncupy 7.8.0 py37h940342b_1 conda-forge\\ncurl 7.71.1 hbc83047_1\\ncython 0.29.21 pypi_0 pypi\\ncytoolz 0.11.0 py37h7b6447c_0\\ndask 2.30.0 py_0\\ndask-core 2.30.0 py_0\\ndask-cudf 0.15.0 py37_g71cb8c0e0_0 rapidsai\\ndistributed 2.30.0 py37_0\\ndlpack 0.3 he6710b0_1\\ndouble-conversion 3.1.5 he6710b0_1\\nfastavro 1.0.0.post1 py37h7b6447c_0\\nfastrlock 0.5 py37he6710b0_0\\nfreetype 2.10.3 h5ab3b9f_0\\nfsspec 0.8.3 py_0\\ngflags 2.2.2 he6710b0_0\\nglog 0.4.0 he6710b0_0\\ngrpc-cpp 1.30.2 heedbac9_0 conda-forge\\nheapdict 1.0.1 py_0\\nicu 67.1 he1b5a44_0 conda-forge\\nidna 2.10 py_0\\nintel-openmp 2019.4 243\\njinja2 2.11.2 py_0\\njpeg 9b h024ee3a_2\\nkrb5 1.18.2 h173b8e3_0\\nlcms2 2.11 h396b838_0\\nld_impl_linux-64 2.33.1 h53a641e_7\\nlibcudf 0.15.0 cuda10.2_g71cb8c0e0_0 rapidsai\\nlibcurl 7.71.1 h20c2e04_1\\nlibedit 3.1.20191231 h14c3975_1\\nlibevent 2.1.10 hcdb4288_3 conda-forge\\nlibffi 3.3 he6710b0_2\\nlibgcc-ng 9.1.0 hdf63c60_0\\nlibllvm10 10.0.1 hbcb73fb_5\\nlibpng 1.6.37 hbc83047_0\\nlibprotobuf 3.12.4 hd408876_0\\nlibrmm 0.15.0 cuda10.2_g8005ca5_0 rapidsai\\nlibssh2 1.9.0 h1ba5d50_1\\nlibstdcxx-ng 9.1.0 hdf63c60_0\\nlibthrift 0.13.0 hbe8ec66_6 conda-forge\\nlibtiff 4.1.0 h2733197_1\\nllvmlite 0.34.0 py37h269e1b5_4\\nlocket 0.2.0 py37_1\\nlz4-c 1.9.2 heb0550a_3\\nmarkupsafe 1.1.1 py37h14c3975_1\\nmkl 2019.4 243\\nmkl-service 2.3.0 py37he904b0f_0\\nmkl_fft 1.2.0 py37h23d657b_0\\nmkl_random 1.1.0 py37hd6b4f25_0\\nmsgpack-python 1.0.0 py37hfd86e86_1\\nnccl 2.7.8.1 hc6a2c23_1 conda-forge\\nncurses 6.2 he6710b0_1\\nninja 1.10.1 py37hfd86e86_0\\nnumba 0.51.2 py37h04863e7_1\\nnumpy 1.19.1 py37hbc911f0_0\\nnumpy-base 1.19.1 py37hfa32c7d_0\\nnvtabular 0.2.0 cudaunknown_py37_0 nvidia/label/nvidia\\nolefile 0.46 py37_0\\nopenssl 1.1.1h h7b6447c_0\\npackaging 20.4 py_0\\npandas 1.1.3 py37he6710b0_0\\nparquet-cpp 1.5.1 2 conda-forge\\npartd 1.1.0 py_0\\npillow 8.0.0 py37h9a89aac_0\\npip 20.2.3 py37_0\\npsutil 5.7.2 py37h7b6447c_0\\npyarrow 0.17.1 py37h1234567_11_cuda conda-forge\\npycosat 0.6.3 py37h7b6447c_0\\npycparser 2.19 pypi_0 pypi\\npynvml 8.0.4 py_1 conda-forge\\npyopenssl 19.1.0 py_1\\npyparsing 2.4.7 py_0\\npysocks 1.7.1 py37_1\\npython 3.7.9 h7579374_0\\npython-dateutil 2.8.1 py_0\\npython_abi 3.7 1_cp37m conda-forge\\npytorch 1.5.0 cpu_py37hd91cbb3_0\\npytz 2020.1 py_0\\npyyaml 5.3.1 py37h7b6447c_1\\nre2 2020.07.06 he1b5a44_1 conda-forge\\nreadline 8.0 h7b6447c_0\\nrequests 2.24.0 py_0\\nrmm 0.15.0 cuda_10.2_py37_g8005ca5_0 rapidsai\\nruamel_yaml 0.15.87 py37h7b6447c_1\\nsetuptools 50.3.0 py37hb0f4dca_1\\nsix 1.15.0 py_0\\nsnappy 1.1.8 he6710b0_0\\nsortedcontainers 2.2.2 py_0\\nspdlog 1.8.0 hfd86e86_1\\nsqlite 3.33.0 h62c20be_0\\ntbb 2020.3 hfd86e86_0\\ntblib 1.7.0 py_0\\nthrift-compiler 0.13.0 hbe8ec66_6 conda-forge\\nthrift-cpp 0.13.0 6 conda-forge\\ntk 8.6.10 hbc83047_0\\ntoolz 0.11.1 py_0\\ntornado 6.0.4 py37h7b6447c_1\\ntqdm 4.50.2 py_0\\ntyping_extensions 3.7.4.3 py_0\\nurllib3 1.25.10 py_0\\nwheel 0.35.1 py_0\\nxz 5.2.5 h7b6447c_0\\nyaml 0.2.5 h7b6447c_0\\nzict 2.0.0 py_0\\nzlib 1.2.11 h7b6447c_3\\nzstd 1.4.5 h9ceee32_0'}]
|
Traceback (most recent call last):
File "main.py", line 106, in <module>
main(args)
File "main.py", line 61, in main
train_paths, engine="parquet", part_mem_fraction=float(args.gpu_mem_frac)
File "/root/miniconda/lib/python3.7/site-packages/nvtabular/io/dataset.py", line 224, in __init__
paths, part_size, storage_options=storage_options, **kwargs
File "/root/miniconda/lib/python3.7/site-packages/nvtabular/io/parquet.py", line 69, in __init__
.memory_usage(deep=True, index=True)
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/dataframe.py", line 842, in memory_usage
sizes = [col._memory_usage(deep=deep) for col in self._data.columns]
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/dataframe.py", line 842, in <listcomp>
sizes = [col._memory_usage(deep=deep) for col in self._data.columns]
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/column/column.py", line 299, in _memory_usage
return self.__sizeof__()
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/column/column.py", line 183, in __sizeof__
n = self.data.size
File "cudf/_lib/column.pyx", line 99, in cudf._lib.column.Column.data.__get__
AttributeError: 'ListDtype' object has no attribute 'itemsize'
|
AttributeError
|
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._meta = {}
self.csv_kwargs = kwargs
self.names = self.csv_kwargs.get("names", None)
# CSV reader needs a list of files
# (Assume flat directory structure if this is a dir)
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
|
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._meta = {}
self.names = kwargs.pop("names", None)
self.csv_kwargs = kwargs
# CSV reader needs a list of files
# (Assume flat directory structure if this is a dir)
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
|
[{'piece_type': 'error message', 'piece_content': "AttributeErrorTraceback (most recent call last)\\n<ipython-input-1-84910288ec3f> in <module>\\n44 del gdf\\n45 path_out = '/raid/criteo/tests/jp_csv_orig/'\\n---> 46 file_to_pq(train_set, 'csv', output_folder=path_out, cols=cols, dtypes=dtypes)\\n\\n<ipython-input-1-84910288ec3f> in file_to_pq(target_files, file_type, output_folder, cols, dtypes)\\n34 old_file_path = None\\n35 writer = None\\n---> 36 for gdf in tar:\\n37 # gdf.to_parquet(output_folder)\\n38 file_path = os.path.join(output_folder, os.path.split(tar.itr.file_path)[1].split('.')[0])\\n\\n/nvtabular/nvtabular/io.py in __iter__(self)\\n329 def __iter__(self):\\n330 for path in self.paths:\\n--> 331 yield from GPUFileIterator(path, **self.kwargs)\\n332\\n333\\n\\n/nvtabular/nvtabular/io.py in __iter__(self)\\n271 for chunk in self.engine:\\n272 if self.dtypes:\\n--> 273 self._set_dtypes(chunk)\\n274 yield chunk\\n275 chunk = None\\n\\nAttributeError: 'GPUFileIterator' object has no attribute '_set_dtypes'"}]
|
AttributeErrorTraceback (most recent call last)
<ipython-input-1-84910288ec3f> in <module>
44 del gdf
45 path_out = '/raid/criteo/tests/jp_csv_orig/'
---> 46 file_to_pq(train_set, 'csv', output_folder=path_out, cols=cols, dtypes=dtypes)
<ipython-input-1-84910288ec3f> in file_to_pq(target_files, file_type, output_folder, cols, dtypes)
34 old_file_path = None
35 writer = None
---> 36 for gdf in tar:
37 # gdf.to_parquet(output_folder)
38 file_path = os.path.join(output_folder, os.path.split(tar.itr.file_path)[1].split('.')[0])
/nvtabular/nvtabular/io.py in __iter__(self)
329 def __iter__(self):
330 for path in self.paths:
--> 331 yield from GPUFileIterator(path, **self.kwargs)
332
333
/nvtabular/nvtabular/io.py in __iter__(self)
271 for chunk in self.engine:
272 if self.dtypes:
--> 273 self._set_dtypes(chunk)
274 yield chunk
275 chunk = None
AttributeError: 'GPUFileIterator' object has no attribute '_set_dtypes'
|
AttributeError
|
def to_ddf(self, columns=None):
return dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)[columns]
|
def to_ddf(self, columns=None):
return dask_cudf.read_csv(
self.paths, names=self.names, chunksize=self.part_size, **self.csv_kwargs
)[columns]
|
[{'piece_type': 'error message', 'piece_content': "AttributeErrorTraceback (most recent call last)\\n<ipython-input-1-84910288ec3f> in <module>\\n44 del gdf\\n45 path_out = '/raid/criteo/tests/jp_csv_orig/'\\n---> 46 file_to_pq(train_set, 'csv', output_folder=path_out, cols=cols, dtypes=dtypes)\\n\\n<ipython-input-1-84910288ec3f> in file_to_pq(target_files, file_type, output_folder, cols, dtypes)\\n34 old_file_path = None\\n35 writer = None\\n---> 36 for gdf in tar:\\n37 # gdf.to_parquet(output_folder)\\n38 file_path = os.path.join(output_folder, os.path.split(tar.itr.file_path)[1].split('.')[0])\\n\\n/nvtabular/nvtabular/io.py in __iter__(self)\\n329 def __iter__(self):\\n330 for path in self.paths:\\n--> 331 yield from GPUFileIterator(path, **self.kwargs)\\n332\\n333\\n\\n/nvtabular/nvtabular/io.py in __iter__(self)\\n271 for chunk in self.engine:\\n272 if self.dtypes:\\n--> 273 self._set_dtypes(chunk)\\n274 yield chunk\\n275 chunk = None\\n\\nAttributeError: 'GPUFileIterator' object has no attribute '_set_dtypes'"}]
|
AttributeErrorTraceback (most recent call last)
<ipython-input-1-84910288ec3f> in <module>
44 del gdf
45 path_out = '/raid/criteo/tests/jp_csv_orig/'
---> 46 file_to_pq(train_set, 'csv', output_folder=path_out, cols=cols, dtypes=dtypes)
<ipython-input-1-84910288ec3f> in file_to_pq(target_files, file_type, output_folder, cols, dtypes)
34 old_file_path = None
35 writer = None
---> 36 for gdf in tar:
37 # gdf.to_parquet(output_folder)
38 file_path = os.path.join(output_folder, os.path.split(tar.itr.file_path)[1].split('.')[0])
/nvtabular/nvtabular/io.py in __iter__(self)
329 def __iter__(self):
330 for path in self.paths:
--> 331 yield from GPUFileIterator(path, **self.kwargs)
332
333
/nvtabular/nvtabular/io.py in __iter__(self)
271 for chunk in self.engine:
272 if self.dtypes:
--> 273 self._set_dtypes(chunk)
274 yield chunk
275 chunk = None
AttributeError: 'GPUFileIterator' object has no attribute '_set_dtypes'
|
AttributeError
|
def _predict(self, X):
"""Collect results from clf.predict calls."""
if self.refit:
return np.asarray([clf.predict(X) for clf in self.clfs_]).T
else:
return np.asarray([self.le_.transform(clf.predict(X))
for clf in self.clfs_]).T
|
def _predict(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([clf.predict(X) for clf in self.clfs_]).T
|
[{'piece_type': 'reproducing source code', 'piece_content': "import numpy as np\\nfrom sklearn.ensemble import RandomForestClassifier\\nfrom mlxtend.classifier import EnsembleVoteClassifier\\n\\ndata = np.array([0, 1, 2, 3, 0, 1, 2, 3])[:, np.newaxis]\\nlabels = ['a', 'b', 'c', 'd', 'a', 'b', 'c', 'd']\\n\\ntest = np.array([0, 1])[:, np.newaxis]\\n\\nrf = RandomForestClassifier()\\nrf.fit(data, labels)\\nprint(rf.predict(test)) # output: ['a', 'b']\\n\\nclf = EnsembleVoteClassifier(clfs=[rf, rf], refit=False)\\nclf.fit(data, labels)\\nprint(clf.predict(test)) # <-- error"}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/_mlxtend_bug/reproduce.py", line 16, in <module>\\nprint(clf.predict(test))\\nFile "/venv/py3/lib/python3.4/site-packages/mlxtend/classifier/ensemble_vote.py", line 197, in predict\\narr=predictions)\\nFile "/venv/py3/lib/python3.4/site-packages/numpy/lib/shape_base.py", line 132, in apply_along_axis\\nres = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))\\nFile "/venv/py3/lib/python3.4/site-packages/mlxtend/classifier/ensemble_vote.py", line 195, in <lambda>\\nweights=self.weights)),\\nTypeError: Cannot cast array data from dtype(\\'<U1\\') to dtype(\\'int64\\') according to the rule \\'safe\\''}, {'piece_type': 'source code', 'piece_content': "else: # 'hard' voting\\npredictions = self._predict(X)\\n\\nmaj = np.apply_along_axis(lambda x:\\nnp.argmax(np.bincount(x,\\nweights=self.weights)),\\naxis=1,\\narr=predictions)"}, {'piece_type': 'source code', 'piece_content': "else: # 'hard' voting\\npredictions = self._predict(X)\\n\\nmaj = np.apply_along_axis(lambda x:\\nnp.argmax(np.bincount(self.le_.transform(x),\\nweights=self.weights)),\\naxis=1,\\narr=predictions)"}]
|
Traceback (most recent call last):
File "/_mlxtend_bug/reproduce.py", line 16, in <module>
print(clf.predict(test))
File "/venv/py3/lib/python3.4/site-packages/mlxtend/classifier/ensemble_vote.py", line 197, in predict
arr=predictions)
File "/venv/py3/lib/python3.4/site-packages/numpy/lib/shape_base.py", line 132, in apply_along_axis
res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
File "/venv/py3/lib/python3.4/site-packages/mlxtend/classifier/ensemble_vote.py", line 195, in <lambda>
weights=self.weights)),
TypeError: Cannot cast array data from dtype('<U1') to dtype('int64') according to the rule 'safe'
|
TypeError
|
def transform(
self,
xx: Any,
yy: Any,
zz: Any = None,
tt: Any = None,
radians: bool = False,
errcheck: bool = False,
direction: Union[TransformDirection, str] = TransformDirection.FORWARD,
) -> Any:
"""
Transform points between two coordinate systems.
.. versionadded:: 2.1.1 errcheck
.. versionadded:: 2.2.0 direction
Parameters
----------
xx: scalar or array (numpy or python)
Input x coordinate(s).
yy: scalar or array (numpy or python)
Input y coordinate(s).
zz: scalar or array (numpy or python), optional
Input z coordinate(s).
tt: scalar or array (numpy or python), optional
Input time coordinate(s).
radians: boolean, optional
If True, will expect input data to be in radians and will return radians
if the projection is geographic. Default is False (degrees). Ignored for
pipeline transformations.
errcheck: boolean, optional (default False)
If True an exception is raised if the transformation is invalid.
By default errcheck=False and an invalid transformation
returns ``inf`` and no exception is raised.
direction: pyproj.enums.TransformDirection, optional
The direction of the transform.
Default is :attr:`pyproj.enums.TransformDirection.FORWARD`.
Example:
>>> from pyproj import Transformer
>>> transformer = Transformer.from_crs("epsg:4326", "epsg:3857")
>>> x3, y3 = transformer.transform(33, 98)
>>> "%.3f %.3f" % (x3, y3)
'10909310.098 3895303.963'
>>> pipeline_str = (
... "+proj=pipeline +step +proj=longlat +ellps=WGS84 "
... "+step +proj=unitconvert +xy_in=rad +xy_out=deg"
... )
>>> pipe_trans = Transformer.from_pipeline(pipeline_str)
>>> xt, yt = pipe_trans.transform(2.1, 0.001)
>>> "%.3f %.3f" % (xt, yt)
'2.100 0.001'
>>> transproj = Transformer.from_crs(
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... "EPSG:4326",
... always_xy=True,
... )
>>> xpj, ypj, zpj = transproj.transform(
... -2704026.010,
... -4253051.810,
... 3895878.820,
... radians=True,
... )
>>> "%.3f %.3f %.3f" % (xpj, ypj, zpj)
'-2.137 0.661 -20.531'
>>> transprojr = Transformer.from_crs(
... "EPSG:4326",
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... always_xy=True,
... )
>>> xpjr, ypjr, zpjr = transprojr.transform(xpj, ypj, zpj, radians=True)
>>> "%.3f %.3f %.3f" % (xpjr, ypjr, zpjr)
'-2704026.010 -4253051.810 3895878.820'
>>> transformer = Transformer.from_proj("epsg:4326", 4326, skip_equivalent=True)
>>> xeq, yeq = transformer.transform(33, 98)
>>> "%.0f %.0f" % (xeq, yeq)
'33 98'
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(xx)
iny, yisfloat, yislist, yistuple = _copytobuffer(yy)
if zz is not None:
inz, zisfloat, zislist, zistuple = _copytobuffer(zz)
else:
inz = None
if tt is not None:
intime, tisfloat, tislist, tistuple = _copytobuffer(tt)
else:
intime = None
# call pj_transform. inx,iny,inz buffers modified in place.
self._transformer._transform(
inx,
iny,
inz=inz,
intime=intime,
direction=direction,
radians=radians,
errcheck=errcheck,
)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat, xislist, xistuple, inx)
outy = _convertback(yisfloat, yislist, xistuple, iny)
return_data = (outx, outy)
if inz is not None:
return_data += ( # type: ignore
_convertback(zisfloat, zislist, zistuple, inz),
)
if intime is not None:
return_data += ( # type: ignore
_convertback(tisfloat, tislist, tistuple, intime),
)
return return_data
|
def transform(
self,
xx: Any,
yy: Any,
zz: Any = None,
tt: Any = None,
radians: bool = False,
errcheck: bool = False,
direction: Union[TransformDirection, str] = TransformDirection.FORWARD,
) -> Any:
"""
Transform points between two coordinate systems.
.. versionadded:: 2.1.1 errcheck
.. versionadded:: 2.2.0 direction
Parameters
----------
xx: scalar or array (numpy or python)
Input x coordinate(s).
yy: scalar or array (numpy or python)
Input y coordinate(s).
zz: scalar or array (numpy or python), optional
Input z coordinate(s).
tt: scalar or array (numpy or python), optional
Input time coordinate(s).
radians: boolean, optional
If True, will expect input data to be in radians and will return radians
if the projection is geographic. Default is False (degrees). Ignored for
pipeline transformations.
errcheck: boolean, optional (default False)
If True an exception is raised if the transformation is invalid.
By default errcheck=False and an invalid transformation
returns ``inf`` and no exception is raised.
direction: pyproj.enums.TransformDirection, optional
The direction of the transform.
Default is :attr:`pyproj.enums.TransformDirection.FORWARD`.
Example:
>>> from pyproj import Transformer
>>> transformer = Transformer.from_crs("epsg:4326", "epsg:3857")
>>> x3, y3 = transformer.transform(33, 98)
>>> "%.3f %.3f" % (x3, y3)
'10909310.098 3895303.963'
>>> pipeline_str = (
... "+proj=pipeline +step +proj=longlat +ellps=WGS84 "
... "+step +proj=unitconvert +xy_in=rad +xy_out=deg"
... )
>>> pipe_trans = Transformer.from_pipeline(pipeline_str)
>>> xt, yt = pipe_trans.transform(2.1, 0.001)
>>> "%.3f %.3f" % (xt, yt)
'120.321 0.057'
>>> transproj = Transformer.from_crs(
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... "EPSG:4326",
... always_xy=True,
... )
>>> xpj, ypj, zpj = transproj.transform(
... -2704026.010,
... -4253051.810,
... 3895878.820,
... radians=True,
... )
>>> "%.3f %.3f %.3f" % (xpj, ypj, zpj)
'-2.137 0.661 -20.531'
>>> transprojr = Transformer.from_crs(
... "EPSG:4326",
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... always_xy=True,
... )
>>> xpjr, ypjr, zpjr = transprojr.transform(xpj, ypj, zpj, radians=True)
>>> "%.3f %.3f %.3f" % (xpjr, ypjr, zpjr)
'-2704026.010 -4253051.810 3895878.820'
>>> transformer = Transformer.from_proj("epsg:4326", 4326, skip_equivalent=True)
>>> xeq, yeq = transformer.transform(33, 98)
>>> "%.0f %.0f" % (xeq, yeq)
'33 98'
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(xx)
iny, yisfloat, yislist, yistuple = _copytobuffer(yy)
if zz is not None:
inz, zisfloat, zislist, zistuple = _copytobuffer(zz)
else:
inz = None
if tt is not None:
intime, tisfloat, tislist, tistuple = _copytobuffer(tt)
else:
intime = None
# call pj_transform. inx,iny,inz buffers modified in place.
self._transformer._transform(
inx,
iny,
inz=inz,
intime=intime,
direction=direction,
radians=radians,
errcheck=errcheck,
)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat, xislist, xistuple, inx)
outy = _convertback(yisfloat, yislist, xistuple, iny)
return_data = (outx, outy)
if inz is not None:
return_data += ( # type: ignore
_convertback(zisfloat, zislist, zistuple, inz),
)
if intime is not None:
return_data += ( # type: ignore
_convertback(tisfloat, tislist, tistuple, intime),
)
return return_data
|
[{'piece_type': 'other', 'piece_content': 'echo 50 25 0 | cct +proj=pipeline +ellps=GRS80 +step +proj=cart'}, {'piece_type': 'other', 'piece_content': '3717892.6072 4430811.8715 2679074.4629 inf'}, {'piece_type': 'source code', 'piece_content': 'from pyproj import Transformer\\n\\nstring = "+proj=pipeline +ellps=GRS80 +step +proj=cart"\\npipe = Transformer.from_pipeline(string)\\npipe.transform(50, 25, 0, errcheck=True)'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "<stdin>", line 1, in <module>\\nFile "/usr/local/lib/python3.7/site-packages/pyproj/transformer.py", line 446, in transform\\nerrcheck=errcheck,\\nFile "pyproj/_transformer.pyx", line 463, in pyproj._transformer._Transformer._transform\\npyproj.exceptions.ProjError: transform error: latitude or longitude exceeded limits'}]
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.7/site-packages/pyproj/transformer.py", line 446, in transform
errcheck=errcheck,
File "pyproj/_transformer.pyx", line 463, in pyproj._transformer._Transformer._transform
pyproj.exceptions.ProjError: transform error: latitude or longitude exceeded limits
|
pyproj.exceptions.ProjError
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 1