function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def run(self):
from dvc.machine import validate_name
validate_name(self.args.name)
if self.args.default:
ui.write(f"Setting '{self.args.name}' as a default machine.")
with self.config.edit(self.args.level) as conf:
if self.args.name in conf["machine"] and not self.args.force:
raise ConfigError(
"machine '{}' already exists. Use `-f|--force` to "
"overwrite it.".format(self.args.name)
)
conf["machine"][self.args.name] = {"cloud": self.args.cloud}
if self.args.default:
conf["core"]["machine"] = self.args.name
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def run(self):
with self.config.edit(self.args.level) as conf:
self._check_exists(conf)
del conf["machine"][self.args.name]
up_to_level = self.args.level or "repo"
# Remove core.machine refs to this machine in any shadowing configs
for level in reversed(self.config.LEVELS):
with self.config.edit(level) as conf:
if conf["core"].get("machine") == self.args.name:
del conf["core"]["machine"]
if level == up_to_level:
break
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def _hide_private(self, conf):
for machine in conf:
for column in self.PRIVATE_COLUMNS:
if column in conf[machine]:
conf[machine][column] = "***" | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def _show_table(self):
td = TabularData(self.TABLE_COLUMNS, fill_value="-")
conf = self.config.read()["machine"]
if self.args.name:
conf = {self.args.name: conf.get(self.args.name, {})}
self._hide_private(conf)
for machine, machine_config in conf.items():
machine_config["name"] = machine
td.row_from_dict(machine_config)
td.dropna("cols", "all")
td.render() | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def run(self):
from dvc.config import merge
with self.config.edit(self.args.level) as conf:
merged = self.config.load_config_to_level(self.args.level)
merge(merged, conf)
self._check_exists(merged)
if self.args.name not in conf["machine"]:
conf["machine"][self.args.name] = {}
section = conf["machine"][self.args.name]
if self.args.unset:
section.pop(self.args.option, None)
else:
section[self.args.option] = self.args.value
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def _check_exists(self, conf):
if self.args.name not in conf["machine"]:
raise ConfigError(f"machine '{self.args.name}' doesn't exist.") | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def _check_before_rename(self):
from dvc.machine import validate_name
validate_name(self.args.new)
all_config = self.config.load_config_to_level(None)
if self.args.new in all_config.get("machine", {}):
raise ConfigError(
"Rename failed. Machine '{}' already exists.".format(
self.args.new
)
)
ui.write(f"Rename machine '{self.args.name}' to '{self.args.new}'.") | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def run(self):
if self.args.name is None and not self.args.unset:
conf = self.config.read(self.args.level)
try:
print(conf["core"]["machine"])
except KeyError:
ui.write("No default machine set")
return 1
else:
with self.config.edit(self.args.level) as conf:
if self.args.unset:
conf["core"].pop("machine", None)
else:
merged_conf = self.config.load_config_to_level(
self.args.level
)
if (
self.args.name in conf["machine"]
or self.args.name in merged_conf["machine"]
):
conf["core"]["machine"] = self.args.name
else:
raise ConfigError(
"default machine must be present in machine "
"list."
)
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.create(self.args.name)
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def _add_row(
self,
name: str,
all_status: List[Dict],
td: TabularData, | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
td = TabularData(
self.INSTANCE_FIELD + self.SHOWN_FIELD, fill_value="-"
)
if self.args.name:
all_status = list(self.repo.machine.status(self.args.name))
self._add_row(self.args.name, all_status, td)
else:
name_set = set()
for level in self.repo.config.LEVELS:
conf = self.repo.config.read(level)["machine"]
name_set.update(conf.keys())
name_list = list(name_set)
for name in sorted(name_list):
all_status = list(self.repo.machine.status(name))
self._add_row(name, all_status, td)
td.dropna("cols", "all")
td.render()
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.destroy(self.args.name)
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.run_shell(self.args.name)
return 0 | dmpetrov/dataversioncontrol | [
11197,
1036,
11197,
597,
1488615393
] |
def __init__(self):
super(self.__class__, self).__init__()
# set target_names
self.target_names = ['background'] + \
[datum['name']
for datum in jsk_apc2016_common.get_object_data()]
n_class = len(self.target_names)
assert n_class == 40
# load model
self.gpu = rospy.get_param('~gpu', 0)
chainermodel = rospy.get_param('~chainermodel')
self.model = FCN32s(n_class=n_class)
S.load_hdf5(chainermodel, self.model)
if self.gpu != -1:
self.model.to_gpu(self.gpu)
jsk_logwarn('>> Model is loaded <<')
while True:
self.tote_contents = rospy.get_param('~tote_contents', None)
if self.tote_contents is not None:
break
logwarn_throttle(10, 'param ~tote_contents is not set. Waiting..')
rospy.sleep(0.1)
self.label_names = rospy.get_param('~label_names')
jsk_logwarn('>> Param is set <<')
self.pub = self.advertise('~output', Image, queue_size=1)
self.pub_debug = self.advertise('~debug', Image, queue_size=1) | start-jsk/jsk_apc | [
35,
37,
35,
24,
1414042111
] |
def unsubscribe(self):
self.sub_img.unregister()
self.sub_mask.unregister() | start-jsk/jsk_apc | [
35,
37,
35,
24,
1414042111
] |
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set() | chrisdickinson/nojs | [
72,
3,
72,
5,
1464475027
] |
def __init__(self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template | chrisdickinson/nojs | [
72,
3,
72,
5,
1464475027
] |
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates) | chrisdickinson/nojs | [
72,
3,
72,
5,
1464475027
] |
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
with build_utils.TempDir() as temp_dir:
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
build_utils.ZipDir(outputs_zip, temp_dir) | chrisdickinson/nojs | [
72,
3,
72,
5,
1464475027
] |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='The template files to process.')
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to '
'the input. Required if --output-zip is given.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
build_utils.AddDepfileOption(parser)
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.depfile:
output = options.output or options.outputs_zip
deps = processor.GetLoadedTemplates()
build_utils.WriteDepfile(options.depfile, output, deps) | chrisdickinson/nojs | [
72,
3,
72,
5,
1464475027
] |
def test_frame_var_after_stop_at_implementation(self):
"""Test that we can find the implementation for an objective C type"""
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
self.build()
self.shlib_names = ["libTestExt.dylib", "libTest.dylib"]
self.common_setup()
line = line_number('TestExt/TestExt.m', '// break here')
lldbutil.run_break_set_by_file_and_line(
self, 'TestExt.m', line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect(
"expr 42",
"A simple expression should execute correctly",
substrs=[
"42"]) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, chrome_version='HEAD'):
"""Initializes a desktop test environment.
Args:
chrome_version: Optionally a chrome version to run the tests against.
"""
self._chrome_version = chrome_version | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def GlobalSetUp(self):
"""Sets up the global test environment state."""
pass | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def GetDisabledJavaTestMatchers(self):
"""Get the list of disabled java test matchers.
Returns:
List of disabled test matchers, which may contain '*' wildcards.
"""
return _EXPECTATIONS['GetDisabledTestMatchers'](self.GetOS()) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def GetPassedJavaTests(self):
"""Get the list of passed java tests.
Returns:
List of passed test names.
"""
with open(os.path.join(_THIS_DIR, 'java_tests.txt'), 'r') as f:
return _EXPECTATIONS['ApplyJavaTestFilter'](
self.GetOS(), [t.strip('\n') for t in f.readlines()]) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def GetOS(self):
return util.GetPlatformName() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def __init__(self, package, chrome_version='HEAD'):
super(AndroidTestEnvironment, self).__init__(chrome_version)
self._package = package
self._device = None
self._forwarder = None | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def GlobalSetUp(self):
devil_chromium.Initialize()
os.putenv('TEST_HTTP_PORT', str(ANDROID_TEST_HTTP_PORT))
os.putenv('TEST_HTTPS_PORT', str(ANDROID_TEST_HTTPS_PORT))
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
raise device_errors.NoDevicesError()
elif len(devices) > 1:
logging.warning('Multiple devices attached. Using %s.' % devices[0])
self._device = devices[0]
forwarder.Forwarder.Map(
[(ANDROID_TEST_HTTP_PORT, ANDROID_TEST_HTTP_PORT),
(ANDROID_TEST_HTTPS_PORT, ANDROID_TEST_HTTPS_PORT)],
self._device) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def GlobalTearDown(self):
if self._device:
forwarder.Forwarder.UnmapAllDevicePorts(self._device) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def fix_key(check_field, new_key):
print("First entry for "+new_key+" is %s" % list(edb.get_timeseries_db().find(
{"metadata.key": "config/sensor_config",
check_field: {"$exists": True}}).sort(
"metadata/write_ts").limit(1)))
udb = edb.get_usercache_db()
tdb = edb.get_timeseries_db()
for i, entry in enumerate(edb.get_timeseries_db().find(
{"metadata.key": "config/sensor_config",
check_field: {"$exists": True}})):
entry["metadata"]["key"] = new_key
if i % 10000 == 0:
print(udb.insert(entry))
print(tdb.remove(entry["_id"]))
else:
udb.insert(entry)
tdb.remove(entry["_id"]) | e-mission/e-mission-server | [
20,
103,
20,
11,
1415342342
] |
def compile_progs():
"Compile the patterns for matching to file name and line number."
global file_line_progs
file_line_progs = [re.compile(pat, re.IGNORECASE)
for pat in file_line_pats] | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line) | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def ispythonsource(self, filename):
"Python source is only part of output: do not colorize."
return False | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def maybesave(self):
"Customize EditorWindow to not display save file messagebox."
return 'yes' if self.get_saved() else 'no' | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def write(self, s, tags=(), mark="insert"):
"""Write text to text widget.
The text is inserted at the given index with the provided
tags. The text widget is then scrolled to make it visible
and updated to display it, giving the effect of seeing each
line as it is added.
Args:
s: Text to insert into text widget.
tags: Tuple of tag strings to apply on the insert.
mark: Index for the insert.
Return:
Length of text inserted.
"""
assert isinstance(s, str)
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
return len(s) | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def flush(self):
"No flushing needed as write() directly writes to widget."
pass | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def goto_file_line(self, event=None):
"""Handle request to open file/line.
If the selected or previous line in the output window
contains a file name and line number, then open that file
name in a new window and position on the line number.
Otherwise, display an error messagebox.
"""
line = self.text.get("insert linestart", "insert lineend")
result = file_line_helper(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = file_line_helper(line)
if not result:
self.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
parent=self.text)
return
filename, lineno = result
self.flist.gotofileline(filename, lineno) | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def __init__(self, flist):
self.flist = flist
self.owin = None | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write | bruderstein/PythonScript | [
310,
62,
310,
74,
1280013973
] |
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new fusiontables handle."""
url = url or self.BASE_URL
super(FusiontablesV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.column = self.ColumnService(self)
self.query = self.QueryService(self)
self.style = self.StyleService(self)
self.table = self.TableService(self)
self.task = self.TaskService(self)
self.template = self.TemplateService(self) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, client):
super(FusiontablesV1.ColumnService, self).__init__(client)
self._upload_configs = {
} | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Get(self, request, global_params=None):
r"""Retrieves a specific column by its id.
Args:
request: (FusiontablesColumnGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Insert(self, request, global_params=None):
r"""Adds a new column to the table.
Args:
request: (FusiontablesColumnInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def List(self, request, global_params=None):
r"""Retrieves a list of columns.
Args:
request: (FusiontablesColumnListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ColumnList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Patch(self, request, global_params=None):
r"""Updates the name or type of an existing column. This method supports patch semantics.
Args:
request: (FusiontablesColumnPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Update(self, request, global_params=None):
r"""Updates the name or type of an existing column.
Args:
request: (FusiontablesColumnUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Column) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, client):
super(FusiontablesV1.QueryService, self).__init__(client)
self._upload_configs = {
} | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def SqlGet(self, request, global_params=None, download=None):
r"""Executes an SQL SELECT/SHOW/DESCRIBE statement.
Args:
request: (FusiontablesQuerySqlGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Sqlresponse) The response message.
"""
config = self.GetMethodConfig('SqlGet')
return self._RunMethod(
config, request, global_params=global_params,
download=download) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, client):
super(FusiontablesV1.StyleService, self).__init__(client)
self._upload_configs = {
} | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Get(self, request, global_params=None):
r"""Gets a specific style.
Args:
request: (FusiontablesStyleGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Insert(self, request, global_params=None):
r"""Adds a new style for the table.
Args:
request: (StyleSetting) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def List(self, request, global_params=None):
r"""Retrieves a list of styles.
Args:
request: (FusiontablesStyleListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSettingList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Patch(self, request, global_params=None):
r"""Updates an existing style. This method supports patch semantics.
Args:
request: (StyleSetting) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Update(self, request, global_params=None):
r"""Updates an existing style.
Args:
request: (StyleSetting) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StyleSetting) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, client):
super(FusiontablesV1.TableService, self).__init__(client)
self._upload_configs = {
'ImportRows': base_api.ApiUploadInfo(
accept=['application/octet-stream'],
max_size=262144000,
resumable_multipart=True,
resumable_path=u'/resumable/upload/fusiontables/v1/tables/{tableId}/import',
simple_multipart=True,
simple_path=u'/upload/fusiontables/v1/tables/{tableId}/import',
),
'ImportTable': base_api.ApiUploadInfo(
accept=['application/octet-stream'],
max_size=262144000,
resumable_multipart=True,
resumable_path=u'/resumable/upload/fusiontables/v1/tables/import',
simple_multipart=True,
simple_path=u'/upload/fusiontables/v1/tables/import',
),
} | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Delete(self, request, global_params=None):
r"""Deletes a table.
Args:
request: (FusiontablesTableDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(FusiontablesTableDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Get(self, request, global_params=None):
r"""Retrieves a specific table by its id.
Args:
request: (FusiontablesTableGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def ImportRows(self, request, global_params=None, upload=None):
r"""Import more rows into a table.
Args:
request: (FusiontablesTableImportRowsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(Import) The response message.
"""
config = self.GetMethodConfig('ImportRows')
upload_config = self.GetUploadConfig('ImportRows')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def ImportTable(self, request, global_params=None, upload=None):
r"""Import a new table.
Args:
request: (FusiontablesTableImportTableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('ImportTable')
upload_config = self.GetUploadConfig('ImportTable')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Insert(self, request, global_params=None):
r"""Creates a new table.
Args:
request: (Table) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def List(self, request, global_params=None):
r"""Retrieves a list of tables a user owns.
Args:
request: (FusiontablesTableListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Patch(self, request, global_params=None):
r"""Updates an existing table. Unless explicitly requested, only the name, description, and attribution will be updated. This method supports patch semantics.
Args:
request: (FusiontablesTablePatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Update(self, request, global_params=None):
r"""Updates an existing table. Unless explicitly requested, only the name, description, and attribution will be updated.
Args:
request: (FusiontablesTableUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, client):
super(FusiontablesV1.TaskService, self).__init__(client)
self._upload_configs = {
} | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Get(self, request, global_params=None):
r"""Retrieves a specific task by its id.
Args:
request: (FusiontablesTaskGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Task) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def List(self, request, global_params=None):
r"""Retrieves a list of tasks.
Args:
request: (FusiontablesTaskListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TaskList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, client):
super(FusiontablesV1.TemplateService, self).__init__(client)
self._upload_configs = {
} | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Get(self, request, global_params=None):
r"""Retrieves a specific template by its id.
Args:
request: (FusiontablesTemplateGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Insert(self, request, global_params=None):
r"""Creates a new template for the table.
Args:
request: (Template) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def List(self, request, global_params=None):
r"""Retrieves a list of templates.
Args:
request: (FusiontablesTemplateListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TemplateList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Patch(self, request, global_params=None):
r"""Updates an existing template. This method supports patch semantics.
Args:
request: (Template) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Update(self, request, global_params=None):
r"""Updates an existing template.
Args:
request: (Template) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def setUp(self):
super().setUp()
self.user = UserFactory() | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
downloaded_custom_connector, driver_curl_source,
driver_curl_target, java_home,
repo_name, plugin_repo_dict,
ranger_env_properties, plugin_properties,
policy_user, policymgr_mgr_url,
plugin_enabled, component_user, component_group, api_version=None, skip_if_rangeradmin_down = True, **kwargs):
if driver_curl_source and not driver_curl_source.endswith("/None"):
if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
File(previous_jdbc_jar, action='delete')
File(downloaded_custom_connector,
content = DownloadSource(driver_curl_source),
mode = 0644
)
Execute(('cp', '--remove-destination', downloaded_custom_connector, driver_curl_target),
path=["/bin", "/usr/bin/"],
sudo=True
)
File(driver_curl_target, mode=0644)
if policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
stack_root = Script.get_stack_root()
stack_version = get_stack_version(component_select_name)
file_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install.properties') | arenadata/ambari | [
3,
7,
3,
3,
1478181309
] |
def chunk(l, n):
for i in range(0, len(l), n):
yield l[i:i + n] | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def setUp(self):
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles() | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def assertDatasetProducesWithShuffle(self, dataset, expected, batch,
num_examples, shuffle):
if shuffle:
actual = []
next_fn = self.getNext(dataset)
for _ in range(num_examples):
elem = self.evaluate(next_fn())
if isinstance(elem, tuple):
actual.extend(elem)
else:
actual.extend(elem.tolist())
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
else:
self.assertDatasetProduces(dataset, list(chunk(expected, batch))) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testFlatMapReaderPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (3, 8)
for r in range(0, 10)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testDatasetOfReaderDatasetsPipeline(self, batch_size):
# This tests a scenario where a list_files main return multiple files
# due to the glob containing wildcards.
def batch(iterator, n):
l = len(iterator)
for i in range(0, l, n):
yield iterator[i:min(i + n, l)]
datasets = []
for files in batch(self._filenames, batch_size):
datasets.append(
dataset_ops.Dataset.list_files(files, shuffle=False).map(
core_readers.TFRecordDataset))
dataset = dataset_ops.Dataset.from_tensor_slices(datasets)
dataset = dataset.flat_map(lambda x: x)
# Simulate additional ops in between flat_map and interleave. This should be
# a no-op since if ShardDataset is placed right after flat_map, we will only
# have two datasets left at this point.
dataset = dataset.prefetch(1)
dataset = dataset.prefetch(1)
dataset = dataset.interleave(
lambda x: x, cycle_length=1, num_parallel_calls=1)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testZipReaderPipeline(self):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
(b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testConcatenateReaderPipeline(self, shuffle):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset1 = dataset1.batch(5)
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset2.batch(5)
dataset = dataset1.concatenate(dataset2)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
expected += expected
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testPipelineWithMap(self, shuffle):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testDirectFilenameTFRecordReaderPipeline(self):
dataset = core_readers.TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testValidPipelineWithRangeDataset(self, shuffle):
dataset = dataset_ops.Dataset.range(self._num_files)
dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda
[self.get_temp_dir(),
string_ops.string_format("/tf_record.{}.txt", [n])]))
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testStandardReaderPipeline(self, params):
num_epochs, index, batch_size, parallel_reads = params
dataset = readers.make_tf_record_dataset(
file_pattern=self._filenames,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=None,
num_parallel_reads=parallel_reads,
drop_final_batch=True,
shuffle=False)
dataset = distribute._AutoShardDataset(dataset, 2, index)
outputs = self.getNext(dataset)
self._verify_records(
outputs,
batch_size=batch_size,
file_index=[i for i in range(index, self._num_records, 2)],
num_epochs=num_epochs,
interleave_cycle_length=parallel_reads,
drop_final_batch=True,
use_parser_fn=None)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(outputs()) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testSampleResNetPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testShardByDataBeforePrefetch(self, sharding_policy):
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.apply(testing.assert_next(["Shard", "Prefetch"]))
dataset = dataset.prefetch(1)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [0, 2]) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testReplicateAndShardProduceDisjointData(self, shuffle, sharding_policy):
dataset = dataset_ops.Dataset.list_files(self._filenames,
shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
graph_def = dataset._as_serialized_graph(
strip_device_assignment=True,
external_state_policy=options_lib.ExternalStatePolicy.WARN)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
ds1 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds2 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds1 = ds1.with_options(options)
ds2 = ds2.with_options(options)
ds1 = distribute._AutoShardDataset(ds1, 2, 0)
ds2 = distribute._AutoShardDataset(ds2, 2, 1)
elems1 = set(self.getAllDatasetElements(ds1))
elems2 = set(self.getAllDatasetElements(ds2))
self.assertEmpty(elems1.intersection(elems2)) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testWorkersGreaterThanNumFilesWithDataSharding(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.DATA)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return "Record (0,5) of file (0 --> 9)" since we are sharding by
# individual elements, we should be able to get some data from all files.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testAutoshardPolicyOff(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.OFF)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return every record in every file since autosharding is turned off.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testFileShardingWithoutReaderDatasetOp(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.FILE)
dataset = dataset_ops.Dataset.range(1024)
dataset = dataset.with_options(options)
# We are specifying that we want a file sharding policy, and this pipeline
# doesn't start with file reading, so we should error out.
with self.assertRaises(errors.NotFoundError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)()) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testWorkersGreaterThanNumFiles(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 500, 499)
self.assertDatasetProduces(dataset, []) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testTFRecordReaderWithDirectFileNames(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testTFRecordReaderWithDirectFileNamesAndShapes(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
# BatchDataset contains `output_types` and `output_shapes`
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 5)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5))) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testShardOutOfRange(self):
dataset = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)()) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testShardOutOfRangeEmptyDataset(self):
dataset = dataset_ops.Dataset.range(0)
with self.assertRaises(errors.OutOfRangeError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)()) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testNoReaderPipelines(self):
dataset = dataset_ops.Dataset.range(1024)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [i for i in range(1024) if i % 2 == 0]) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testUnknownOpInPipelineStillShardsAtTheEnd(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.apply(unique.unique())
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testInvalidWorkerIndex(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 2, 2)
self.evaluate(self.getNext(dataset)()) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testAssertCardinality(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = dataset.apply(cardinality.assert_cardinality(42))
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5))) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.