repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.kernels_pull_cli | def kernels_pull_cli(self,
kernel,
kernel_opt=None,
path=None,
metadata=False):
""" client wrapper for kernels_pull
"""
kernel = kernel or kernel_opt
effective_path = self.kernels_pull(
kernel, path=path, metadata=metadata, quiet=False)
if metadata:
print('Source code and metadata downloaded to ' + effective_path)
else:
print('Source code downloaded to ' + effective_path) | python | def kernels_pull_cli(self,
kernel,
kernel_opt=None,
path=None,
metadata=False):
""" client wrapper for kernels_pull
"""
kernel = kernel or kernel_opt
effective_path = self.kernels_pull(
kernel, path=path, metadata=metadata, quiet=False)
if metadata:
print('Source code and metadata downloaded to ' + effective_path)
else:
print('Source code downloaded to ' + effective_path) | [
"def",
"kernels_pull_cli",
"(",
"self",
",",
"kernel",
",",
"kernel_opt",
"=",
"None",
",",
"path",
"=",
"None",
",",
"metadata",
"=",
"False",
")",
":",
"kernel",
"=",
"kernel",
"or",
"kernel_opt",
"effective_path",
"=",
"self",
".",
"kernels_pull",
"(",
"kernel",
",",
"path",
"=",
"path",
",",
"metadata",
"=",
"metadata",
",",
"quiet",
"=",
"False",
")",
"if",
"metadata",
":",
"print",
"(",
"'Source code and metadata downloaded to '",
"+",
"effective_path",
")",
"else",
":",
"print",
"(",
"'Source code downloaded to '",
"+",
"effective_path",
")"
] | client wrapper for kernels_pull | [
"client",
"wrapper",
"for",
"kernels_pull"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L1965-L1978 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.kernels_output | def kernels_output(self, kernel, path, force=False, quiet=True):
""" retrieve output for a specified kernel
Parameters
==========
kernel: the kernel to output
path: the path to pull files to on the filesystem
force: if output already exists, force overwrite (default False)
quiet: suppress verbosity (default is True)
"""
if kernel is None:
raise ValueError('A kernel must be specified')
if '/' in kernel:
self.validate_kernel_string(kernel)
kernel_url_list = kernel.split('/')
owner_slug = kernel_url_list[0]
kernel_slug = kernel_url_list[1]
else:
owner_slug = self.get_config_value(self.CONFIG_NAME_USER)
kernel_slug = kernel
if path is None:
target_dir = self.get_default_download_dir('kernels', owner_slug,
kernel_slug, 'output')
else:
target_dir = path
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.isdir(target_dir):
raise ValueError(
'You must specify a directory for the kernels output')
response = self.process_response(
self.kernel_output_with_http_info(owner_slug, kernel_slug))
outfiles = []
for item in response['files']:
outfile = os.path.join(target_dir, item['fileName'])
outfiles.append(outfile)
download_response = requests.get(item['url'])
if force or self.download_needed(item, outfile, quiet):
os.makedirs(os.path.split(outfile)[0], exist_ok=True)
with open(outfile, 'wb') as out:
out.write(download_response.content)
if not quiet:
print('Output file downloaded to %s' % outfile)
log = response['log']
if log:
outfile = os.path.join(target_dir, kernel_slug + '.log')
outfiles.append(outfile)
with open(outfile, 'w') as out:
out.write(log)
if not quiet:
print('Kernel log downloaded to %s ' % outfile)
return outfiles | python | def kernels_output(self, kernel, path, force=False, quiet=True):
""" retrieve output for a specified kernel
Parameters
==========
kernel: the kernel to output
path: the path to pull files to on the filesystem
force: if output already exists, force overwrite (default False)
quiet: suppress verbosity (default is True)
"""
if kernel is None:
raise ValueError('A kernel must be specified')
if '/' in kernel:
self.validate_kernel_string(kernel)
kernel_url_list = kernel.split('/')
owner_slug = kernel_url_list[0]
kernel_slug = kernel_url_list[1]
else:
owner_slug = self.get_config_value(self.CONFIG_NAME_USER)
kernel_slug = kernel
if path is None:
target_dir = self.get_default_download_dir('kernels', owner_slug,
kernel_slug, 'output')
else:
target_dir = path
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.isdir(target_dir):
raise ValueError(
'You must specify a directory for the kernels output')
response = self.process_response(
self.kernel_output_with_http_info(owner_slug, kernel_slug))
outfiles = []
for item in response['files']:
outfile = os.path.join(target_dir, item['fileName'])
outfiles.append(outfile)
download_response = requests.get(item['url'])
if force or self.download_needed(item, outfile, quiet):
os.makedirs(os.path.split(outfile)[0], exist_ok=True)
with open(outfile, 'wb') as out:
out.write(download_response.content)
if not quiet:
print('Output file downloaded to %s' % outfile)
log = response['log']
if log:
outfile = os.path.join(target_dir, kernel_slug + '.log')
outfiles.append(outfile)
with open(outfile, 'w') as out:
out.write(log)
if not quiet:
print('Kernel log downloaded to %s ' % outfile)
return outfiles | [
"def",
"kernels_output",
"(",
"self",
",",
"kernel",
",",
"path",
",",
"force",
"=",
"False",
",",
"quiet",
"=",
"True",
")",
":",
"if",
"kernel",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'A kernel must be specified'",
")",
"if",
"'/'",
"in",
"kernel",
":",
"self",
".",
"validate_kernel_string",
"(",
"kernel",
")",
"kernel_url_list",
"=",
"kernel",
".",
"split",
"(",
"'/'",
")",
"owner_slug",
"=",
"kernel_url_list",
"[",
"0",
"]",
"kernel_slug",
"=",
"kernel_url_list",
"[",
"1",
"]",
"else",
":",
"owner_slug",
"=",
"self",
".",
"get_config_value",
"(",
"self",
".",
"CONFIG_NAME_USER",
")",
"kernel_slug",
"=",
"kernel",
"if",
"path",
"is",
"None",
":",
"target_dir",
"=",
"self",
".",
"get_default_download_dir",
"(",
"'kernels'",
",",
"owner_slug",
",",
"kernel_slug",
",",
"'output'",
")",
"else",
":",
"target_dir",
"=",
"path",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"target_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"target_dir",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"target_dir",
")",
":",
"raise",
"ValueError",
"(",
"'You must specify a directory for the kernels output'",
")",
"response",
"=",
"self",
".",
"process_response",
"(",
"self",
".",
"kernel_output_with_http_info",
"(",
"owner_slug",
",",
"kernel_slug",
")",
")",
"outfiles",
"=",
"[",
"]",
"for",
"item",
"in",
"response",
"[",
"'files'",
"]",
":",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_dir",
",",
"item",
"[",
"'fileName'",
"]",
")",
"outfiles",
".",
"append",
"(",
"outfile",
")",
"download_response",
"=",
"requests",
".",
"get",
"(",
"item",
"[",
"'url'",
"]",
")",
"if",
"force",
"or",
"self",
".",
"download_needed",
"(",
"item",
",",
"outfile",
",",
"quiet",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"split",
"(",
"outfile",
")",
"[",
"0",
"]",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"download_response",
".",
"content",
")",
"if",
"not",
"quiet",
":",
"print",
"(",
"'Output file downloaded to %s'",
"%",
"outfile",
")",
"log",
"=",
"response",
"[",
"'log'",
"]",
"if",
"log",
":",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_dir",
",",
"kernel_slug",
"+",
"'.log'",
")",
"outfiles",
".",
"append",
"(",
"outfile",
")",
"with",
"open",
"(",
"outfile",
",",
"'w'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"log",
")",
"if",
"not",
"quiet",
":",
"print",
"(",
"'Kernel log downloaded to %s '",
"%",
"outfile",
")",
"return",
"outfiles"
] | retrieve output for a specified kernel
Parameters
==========
kernel: the kernel to output
path: the path to pull files to on the filesystem
force: if output already exists, force overwrite (default False)
quiet: suppress verbosity (default is True) | [
"retrieve",
"output",
"for",
"a",
"specified",
"kernel",
"Parameters",
"==========",
"kernel",
":",
"the",
"kernel",
"to",
"output",
"path",
":",
"the",
"path",
"to",
"pull",
"files",
"to",
"on",
"the",
"filesystem",
"force",
":",
"if",
"output",
"already",
"exists",
"force",
"overwrite",
"(",
"default",
"False",
")",
"quiet",
":",
"suppress",
"verbosity",
"(",
"default",
"is",
"True",
")"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L1980-L2036 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.kernels_output_cli | def kernels_output_cli(self,
kernel,
kernel_opt=None,
path=None,
force=False,
quiet=False):
""" client wrapper for kernels_output, with same arguments. Extra
arguments are described below, and see kernels_output for others.
Parameters
==========
kernel_opt: option from client instead of kernel, if not defined
"""
kernel = kernel or kernel_opt
self.kernels_output(kernel, path, force, quiet) | python | def kernels_output_cli(self,
kernel,
kernel_opt=None,
path=None,
force=False,
quiet=False):
""" client wrapper for kernels_output, with same arguments. Extra
arguments are described below, and see kernels_output for others.
Parameters
==========
kernel_opt: option from client instead of kernel, if not defined
"""
kernel = kernel or kernel_opt
self.kernels_output(kernel, path, force, quiet) | [
"def",
"kernels_output_cli",
"(",
"self",
",",
"kernel",
",",
"kernel_opt",
"=",
"None",
",",
"path",
"=",
"None",
",",
"force",
"=",
"False",
",",
"quiet",
"=",
"False",
")",
":",
"kernel",
"=",
"kernel",
"or",
"kernel_opt",
"self",
".",
"kernels_output",
"(",
"kernel",
",",
"path",
",",
"force",
",",
"quiet",
")"
] | client wrapper for kernels_output, with same arguments. Extra
arguments are described below, and see kernels_output for others.
Parameters
==========
kernel_opt: option from client instead of kernel, if not defined | [
"client",
"wrapper",
"for",
"kernels_output",
"with",
"same",
"arguments",
".",
"Extra",
"arguments",
"are",
"described",
"below",
"and",
"see",
"kernels_output",
"for",
"others",
".",
"Parameters",
"==========",
"kernel_opt",
":",
"option",
"from",
"client",
"instead",
"of",
"kernel",
"if",
"not",
"defined"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2038-L2051 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.kernels_status | def kernels_status(self, kernel):
""" call to the api to get the status of a kernel.
Parameters
==========
kernel: the kernel to get the status for
"""
if kernel is None:
raise ValueError('A kernel must be specified')
if '/' in kernel:
self.validate_kernel_string(kernel)
kernel_url_list = kernel.split('/')
owner_slug = kernel_url_list[0]
kernel_slug = kernel_url_list[1]
else:
owner_slug = self.get_config_value(self.CONFIG_NAME_USER)
kernel_slug = kernel
response = self.process_response(
self.kernel_status_with_http_info(owner_slug, kernel_slug))
return response | python | def kernels_status(self, kernel):
""" call to the api to get the status of a kernel.
Parameters
==========
kernel: the kernel to get the status for
"""
if kernel is None:
raise ValueError('A kernel must be specified')
if '/' in kernel:
self.validate_kernel_string(kernel)
kernel_url_list = kernel.split('/')
owner_slug = kernel_url_list[0]
kernel_slug = kernel_url_list[1]
else:
owner_slug = self.get_config_value(self.CONFIG_NAME_USER)
kernel_slug = kernel
response = self.process_response(
self.kernel_status_with_http_info(owner_slug, kernel_slug))
return response | [
"def",
"kernels_status",
"(",
"self",
",",
"kernel",
")",
":",
"if",
"kernel",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'A kernel must be specified'",
")",
"if",
"'/'",
"in",
"kernel",
":",
"self",
".",
"validate_kernel_string",
"(",
"kernel",
")",
"kernel_url_list",
"=",
"kernel",
".",
"split",
"(",
"'/'",
")",
"owner_slug",
"=",
"kernel_url_list",
"[",
"0",
"]",
"kernel_slug",
"=",
"kernel_url_list",
"[",
"1",
"]",
"else",
":",
"owner_slug",
"=",
"self",
".",
"get_config_value",
"(",
"self",
".",
"CONFIG_NAME_USER",
")",
"kernel_slug",
"=",
"kernel",
"response",
"=",
"self",
".",
"process_response",
"(",
"self",
".",
"kernel_status_with_http_info",
"(",
"owner_slug",
",",
"kernel_slug",
")",
")",
"return",
"response"
] | call to the api to get the status of a kernel.
Parameters
==========
kernel: the kernel to get the status for | [
"call",
"to",
"the",
"api",
"to",
"get",
"the",
"status",
"of",
"a",
"kernel",
".",
"Parameters",
"==========",
"kernel",
":",
"the",
"kernel",
"to",
"get",
"the",
"status",
"for"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2053-L2071 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.kernels_status_cli | def kernels_status_cli(self, kernel, kernel_opt=None):
""" client wrapper for kernel_status
Parameters
==========
kernel_opt: additional option from the client, if kernel not defined
"""
kernel = kernel or kernel_opt
response = self.kernels_status(kernel)
status = response['status']
message = response['failureMessage']
if message:
print('%s has status "%s"' % (kernel, status))
print('Failure message: "%s"' % message)
else:
print('%s has status "%s"' % (kernel, status)) | python | def kernels_status_cli(self, kernel, kernel_opt=None):
""" client wrapper for kernel_status
Parameters
==========
kernel_opt: additional option from the client, if kernel not defined
"""
kernel = kernel or kernel_opt
response = self.kernels_status(kernel)
status = response['status']
message = response['failureMessage']
if message:
print('%s has status "%s"' % (kernel, status))
print('Failure message: "%s"' % message)
else:
print('%s has status "%s"' % (kernel, status)) | [
"def",
"kernels_status_cli",
"(",
"self",
",",
"kernel",
",",
"kernel_opt",
"=",
"None",
")",
":",
"kernel",
"=",
"kernel",
"or",
"kernel_opt",
"response",
"=",
"self",
".",
"kernels_status",
"(",
"kernel",
")",
"status",
"=",
"response",
"[",
"'status'",
"]",
"message",
"=",
"response",
"[",
"'failureMessage'",
"]",
"if",
"message",
":",
"print",
"(",
"'%s has status \"%s\"'",
"%",
"(",
"kernel",
",",
"status",
")",
")",
"print",
"(",
"'Failure message: \"%s\"'",
"%",
"message",
")",
"else",
":",
"print",
"(",
"'%s has status \"%s\"'",
"%",
"(",
"kernel",
",",
"status",
")",
")"
] | client wrapper for kernel_status
Parameters
==========
kernel_opt: additional option from the client, if kernel not defined | [
"client",
"wrapper",
"for",
"kernel_status",
"Parameters",
"==========",
"kernel_opt",
":",
"additional",
"option",
"from",
"the",
"client",
"if",
"kernel",
"not",
"defined"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2073-L2087 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.download_needed | def download_needed(self, response, outfile, quiet=True):
""" determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose output (default is True)
"""
try:
remote_date = datetime.strptime(response.headers['Last-Modified'],
'%a, %d %b %Y %X %Z')
if isfile(outfile):
local_date = datetime.fromtimestamp(os.path.getmtime(outfile))
if remote_date <= local_date:
if not quiet:
print(os.path.basename(outfile) +
': Skipping, found more recently modified local '
'copy (use --force to force download)')
return False
except:
pass
return True | python | def download_needed(self, response, outfile, quiet=True):
""" determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose output (default is True)
"""
try:
remote_date = datetime.strptime(response.headers['Last-Modified'],
'%a, %d %b %Y %X %Z')
if isfile(outfile):
local_date = datetime.fromtimestamp(os.path.getmtime(outfile))
if remote_date <= local_date:
if not quiet:
print(os.path.basename(outfile) +
': Skipping, found more recently modified local '
'copy (use --force to force download)')
return False
except:
pass
return True | [
"def",
"download_needed",
"(",
"self",
",",
"response",
",",
"outfile",
",",
"quiet",
"=",
"True",
")",
":",
"try",
":",
"remote_date",
"=",
"datetime",
".",
"strptime",
"(",
"response",
".",
"headers",
"[",
"'Last-Modified'",
"]",
",",
"'%a, %d %b %Y %X %Z'",
")",
"if",
"isfile",
"(",
"outfile",
")",
":",
"local_date",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"outfile",
")",
")",
"if",
"remote_date",
"<=",
"local_date",
":",
"if",
"not",
"quiet",
":",
"print",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"outfile",
")",
"+",
"': Skipping, found more recently modified local '",
"'copy (use --force to force download)'",
")",
"return",
"False",
"except",
":",
"pass",
"return",
"True"
] | determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose output (default is True) | [
"determine",
"if",
"a",
"download",
"is",
"needed",
"based",
"on",
"timestamp",
".",
"Return",
"True",
"if",
"needed",
"(",
"remote",
"is",
"newer",
")",
"or",
"False",
"if",
"local",
"is",
"newest",
".",
"Parameters",
"==========",
"response",
":",
"the",
"response",
"from",
"the",
"API",
"outfile",
":",
"the",
"output",
"file",
"to",
"write",
"to",
"quiet",
":",
"suppress",
"verbose",
"output",
"(",
"default",
"is",
"True",
")"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2089-L2111 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.print_table | def print_table(self, items, fields):
""" print a table of items, for a set of fields defined
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items
"""
formats = []
borders = []
for f in fields:
length = max(
len(f), max([len(self.string(getattr(i, f))) for i in items]))
justify = '>' if isinstance(getattr(
items[0], f), int) or f == 'size' or f == 'reward' else '<'
formats.append('{:' + justify + self.string(length + 2) + '}')
borders.append('-' * length + ' ')
row_format = u''.join(formats)
headers = [f + ' ' for f in fields]
print(row_format.format(*headers))
print(row_format.format(*borders))
for i in items:
i_fields = [self.string(getattr(i, f)) + ' ' for f in fields]
try:
print(row_format.format(*i_fields))
except UnicodeEncodeError:
print(row_format.format(*i_fields).encode('utf-8')) | python | def print_table(self, items, fields):
""" print a table of items, for a set of fields defined
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items
"""
formats = []
borders = []
for f in fields:
length = max(
len(f), max([len(self.string(getattr(i, f))) for i in items]))
justify = '>' if isinstance(getattr(
items[0], f), int) or f == 'size' or f == 'reward' else '<'
formats.append('{:' + justify + self.string(length + 2) + '}')
borders.append('-' * length + ' ')
row_format = u''.join(formats)
headers = [f + ' ' for f in fields]
print(row_format.format(*headers))
print(row_format.format(*borders))
for i in items:
i_fields = [self.string(getattr(i, f)) + ' ' for f in fields]
try:
print(row_format.format(*i_fields))
except UnicodeEncodeError:
print(row_format.format(*i_fields).encode('utf-8')) | [
"def",
"print_table",
"(",
"self",
",",
"items",
",",
"fields",
")",
":",
"formats",
"=",
"[",
"]",
"borders",
"=",
"[",
"]",
"for",
"f",
"in",
"fields",
":",
"length",
"=",
"max",
"(",
"len",
"(",
"f",
")",
",",
"max",
"(",
"[",
"len",
"(",
"self",
".",
"string",
"(",
"getattr",
"(",
"i",
",",
"f",
")",
")",
")",
"for",
"i",
"in",
"items",
"]",
")",
")",
"justify",
"=",
"'>'",
"if",
"isinstance",
"(",
"getattr",
"(",
"items",
"[",
"0",
"]",
",",
"f",
")",
",",
"int",
")",
"or",
"f",
"==",
"'size'",
"or",
"f",
"==",
"'reward'",
"else",
"'<'",
"formats",
".",
"append",
"(",
"'{:'",
"+",
"justify",
"+",
"self",
".",
"string",
"(",
"length",
"+",
"2",
")",
"+",
"'}'",
")",
"borders",
".",
"append",
"(",
"'-'",
"*",
"length",
"+",
"' '",
")",
"row_format",
"=",
"u''",
".",
"join",
"(",
"formats",
")",
"headers",
"=",
"[",
"f",
"+",
"' '",
"for",
"f",
"in",
"fields",
"]",
"print",
"(",
"row_format",
".",
"format",
"(",
"*",
"headers",
")",
")",
"print",
"(",
"row_format",
".",
"format",
"(",
"*",
"borders",
")",
")",
"for",
"i",
"in",
"items",
":",
"i_fields",
"=",
"[",
"self",
".",
"string",
"(",
"getattr",
"(",
"i",
",",
"f",
")",
")",
"+",
"' '",
"for",
"f",
"in",
"fields",
"]",
"try",
":",
"print",
"(",
"row_format",
".",
"format",
"(",
"*",
"i_fields",
")",
")",
"except",
"UnicodeEncodeError",
":",
"print",
"(",
"row_format",
".",
"format",
"(",
"*",
"i_fields",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] | print a table of items, for a set of fields defined
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items | [
"print",
"a",
"table",
"of",
"items",
"for",
"a",
"set",
"of",
"fields",
"defined"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2113-L2139 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.print_csv | def print_csv(self, items, fields):
""" print a set of fields in a set of items using a csv.writer
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items
"""
writer = csv.writer(sys.stdout)
writer.writerow(fields)
for i in items:
i_fields = [self.string(getattr(i, f)) for f in fields]
writer.writerow(i_fields) | python | def print_csv(self, items, fields):
""" print a set of fields in a set of items using a csv.writer
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items
"""
writer = csv.writer(sys.stdout)
writer.writerow(fields)
for i in items:
i_fields = [self.string(getattr(i, f)) for f in fields]
writer.writerow(i_fields) | [
"def",
"print_csv",
"(",
"self",
",",
"items",
",",
"fields",
")",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"sys",
".",
"stdout",
")",
"writer",
".",
"writerow",
"(",
"fields",
")",
"for",
"i",
"in",
"items",
":",
"i_fields",
"=",
"[",
"self",
".",
"string",
"(",
"getattr",
"(",
"i",
",",
"f",
")",
")",
"for",
"f",
"in",
"fields",
"]",
"writer",
".",
"writerow",
"(",
"i_fields",
")"
] | print a set of fields in a set of items using a csv.writer
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items | [
"print",
"a",
"set",
"of",
"fields",
"in",
"a",
"set",
"of",
"items",
"using",
"a",
"csv",
".",
"writer"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2141-L2153 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.process_response | def process_response(self, result):
""" process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API
"""
if len(result) == 3:
data = result[0]
headers = result[2]
if self.HEADER_API_VERSION in headers:
api_version = headers[self.HEADER_API_VERSION]
if (not self.already_printed_version_warning
and not self.is_up_to_date(api_version)):
print('Warning: Looks like you\'re using an outdated API '
'Version, please consider updating (server ' +
api_version + ' / client ' + self.__version__ + ')')
self.already_printed_version_warning = True
return data
return result | python | def process_response(self, result):
""" process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API
"""
if len(result) == 3:
data = result[0]
headers = result[2]
if self.HEADER_API_VERSION in headers:
api_version = headers[self.HEADER_API_VERSION]
if (not self.already_printed_version_warning
and not self.is_up_to_date(api_version)):
print('Warning: Looks like you\'re using an outdated API '
'Version, please consider updating (server ' +
api_version + ' / client ' + self.__version__ + ')')
self.already_printed_version_warning = True
return data
return result | [
"def",
"process_response",
"(",
"self",
",",
"result",
")",
":",
"if",
"len",
"(",
"result",
")",
"==",
"3",
":",
"data",
"=",
"result",
"[",
"0",
"]",
"headers",
"=",
"result",
"[",
"2",
"]",
"if",
"self",
".",
"HEADER_API_VERSION",
"in",
"headers",
":",
"api_version",
"=",
"headers",
"[",
"self",
".",
"HEADER_API_VERSION",
"]",
"if",
"(",
"not",
"self",
".",
"already_printed_version_warning",
"and",
"not",
"self",
".",
"is_up_to_date",
"(",
"api_version",
")",
")",
":",
"print",
"(",
"'Warning: Looks like you\\'re using an outdated API '",
"'Version, please consider updating (server '",
"+",
"api_version",
"+",
"' / client '",
"+",
"self",
".",
"__version__",
"+",
"')'",
")",
"self",
".",
"already_printed_version_warning",
"=",
"True",
"return",
"data",
"return",
"result"
] | process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API | [
"process",
"a",
"response",
"from",
"the",
"API",
".",
"We",
"check",
"the",
"API",
"version",
"against",
"the",
"client",
"s",
"to",
"see",
"if",
"it",
"s",
"old",
"and",
"give",
"them",
"a",
"warning",
"(",
"once",
")"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2181-L2201 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.is_up_to_date | def is_up_to_date(self, server_version):
""" determine if a client (on the local user's machine) is up to date
with the version provided on the server. Return a boolean with True
or False
Parameters
==========
server_version: the server version string to compare to the host
"""
client_split = self.__version__.split('.')
client_len = len(client_split)
server_split = server_version.split('.')
server_len = len(server_split)
# Make both lists the same length
for i in range(client_len, server_len):
client_split.append('0')
for i in range(server_len, client_len):
server_split.append('0')
for i in range(0, client_len):
if 'b' in client_split[i]:
# Using a beta version, don't check
return True
client = int(client_split[i])
server = int(server_split[i])
if client < server:
return False
elif server < client:
return True
return True | python | def is_up_to_date(self, server_version):
""" determine if a client (on the local user's machine) is up to date
with the version provided on the server. Return a boolean with True
or False
Parameters
==========
server_version: the server version string to compare to the host
"""
client_split = self.__version__.split('.')
client_len = len(client_split)
server_split = server_version.split('.')
server_len = len(server_split)
# Make both lists the same length
for i in range(client_len, server_len):
client_split.append('0')
for i in range(server_len, client_len):
server_split.append('0')
for i in range(0, client_len):
if 'b' in client_split[i]:
# Using a beta version, don't check
return True
client = int(client_split[i])
server = int(server_split[i])
if client < server:
return False
elif server < client:
return True
return True | [
"def",
"is_up_to_date",
"(",
"self",
",",
"server_version",
")",
":",
"client_split",
"=",
"self",
".",
"__version__",
".",
"split",
"(",
"'.'",
")",
"client_len",
"=",
"len",
"(",
"client_split",
")",
"server_split",
"=",
"server_version",
".",
"split",
"(",
"'.'",
")",
"server_len",
"=",
"len",
"(",
"server_split",
")",
"# Make both lists the same length",
"for",
"i",
"in",
"range",
"(",
"client_len",
",",
"server_len",
")",
":",
"client_split",
".",
"append",
"(",
"'0'",
")",
"for",
"i",
"in",
"range",
"(",
"server_len",
",",
"client_len",
")",
":",
"server_split",
".",
"append",
"(",
"'0'",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"client_len",
")",
":",
"if",
"'b'",
"in",
"client_split",
"[",
"i",
"]",
":",
"# Using a beta version, don't check",
"return",
"True",
"client",
"=",
"int",
"(",
"client_split",
"[",
"i",
"]",
")",
"server",
"=",
"int",
"(",
"server_split",
"[",
"i",
"]",
")",
"if",
"client",
"<",
"server",
":",
"return",
"False",
"elif",
"server",
"<",
"client",
":",
"return",
"True",
"return",
"True"
] | determine if a client (on the local user's machine) is up to date
with the version provided on the server. Return a boolean with True
or False
Parameters
==========
server_version: the server version string to compare to the host | [
"determine",
"if",
"a",
"client",
"(",
"on",
"the",
"local",
"user",
"s",
"machine",
")",
"is",
"up",
"to",
"date",
"with",
"the",
"version",
"provided",
"on",
"the",
"server",
".",
"Return",
"a",
"boolean",
"with",
"True",
"or",
"False",
"Parameters",
"==========",
"server_version",
":",
"the",
"server",
"version",
"string",
"to",
"compare",
"to",
"the",
"host"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2203-L2233 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.upload_files | def upload_files(self,
request,
resources,
folder,
quiet=False,
dir_mode='skip'):
""" upload files in a folder
Parameters
==========
request: the prepared request
resources: the files to upload
folder: the folder to upload from
quiet: suppress verbose output (default is False)
"""
for file_name in os.listdir(folder):
if (file_name == self.DATASET_METADATA_FILE
or file_name == self.OLD_DATASET_METADATA_FILE
or file_name == self.KERNEL_METADATA_FILE):
continue
full_path = os.path.join(folder, file_name)
if os.path.isfile(full_path):
exitcode = self._upload_file(file_name, full_path, quiet,
request, resources)
if exitcode:
return
elif os.path.isdir(full_path):
if dir_mode in ['zip', 'tar']:
temp_dir = tempfile.mkdtemp()
try:
_, dir_name = os.path.split(full_path)
archive_path = shutil.make_archive(
os.path.join(temp_dir, dir_name), dir_mode,
full_path)
_, archive_name = os.path.split(archive_path)
exitcode = self._upload_file(archive_name,
archive_path, quiet,
request, resources)
finally:
shutil.rmtree(temp_dir)
if exitcode:
return
elif not quiet:
print("Skipping folder: " + file_name +
"; use '--dir-mode' to upload folders")
else:
if not quiet:
print('Skipping: ' + file_name) | python | def upload_files(self,
request,
resources,
folder,
quiet=False,
dir_mode='skip'):
""" upload files in a folder
Parameters
==========
request: the prepared request
resources: the files to upload
folder: the folder to upload from
quiet: suppress verbose output (default is False)
"""
for file_name in os.listdir(folder):
if (file_name == self.DATASET_METADATA_FILE
or file_name == self.OLD_DATASET_METADATA_FILE
or file_name == self.KERNEL_METADATA_FILE):
continue
full_path = os.path.join(folder, file_name)
if os.path.isfile(full_path):
exitcode = self._upload_file(file_name, full_path, quiet,
request, resources)
if exitcode:
return
elif os.path.isdir(full_path):
if dir_mode in ['zip', 'tar']:
temp_dir = tempfile.mkdtemp()
try:
_, dir_name = os.path.split(full_path)
archive_path = shutil.make_archive(
os.path.join(temp_dir, dir_name), dir_mode,
full_path)
_, archive_name = os.path.split(archive_path)
exitcode = self._upload_file(archive_name,
archive_path, quiet,
request, resources)
finally:
shutil.rmtree(temp_dir)
if exitcode:
return
elif not quiet:
print("Skipping folder: " + file_name +
"; use '--dir-mode' to upload folders")
else:
if not quiet:
print('Skipping: ' + file_name) | [
"def",
"upload_files",
"(",
"self",
",",
"request",
",",
"resources",
",",
"folder",
",",
"quiet",
"=",
"False",
",",
"dir_mode",
"=",
"'skip'",
")",
":",
"for",
"file_name",
"in",
"os",
".",
"listdir",
"(",
"folder",
")",
":",
"if",
"(",
"file_name",
"==",
"self",
".",
"DATASET_METADATA_FILE",
"or",
"file_name",
"==",
"self",
".",
"OLD_DATASET_METADATA_FILE",
"or",
"file_name",
"==",
"self",
".",
"KERNEL_METADATA_FILE",
")",
":",
"continue",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"file_name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"exitcode",
"=",
"self",
".",
"_upload_file",
"(",
"file_name",
",",
"full_path",
",",
"quiet",
",",
"request",
",",
"resources",
")",
"if",
"exitcode",
":",
"return",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"full_path",
")",
":",
"if",
"dir_mode",
"in",
"[",
"'zip'",
",",
"'tar'",
"]",
":",
"temp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"_",
",",
"dir_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"full_path",
")",
"archive_path",
"=",
"shutil",
".",
"make_archive",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"dir_name",
")",
",",
"dir_mode",
",",
"full_path",
")",
"_",
",",
"archive_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"archive_path",
")",
"exitcode",
"=",
"self",
".",
"_upload_file",
"(",
"archive_name",
",",
"archive_path",
",",
"quiet",
",",
"request",
",",
"resources",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"temp_dir",
")",
"if",
"exitcode",
":",
"return",
"elif",
"not",
"quiet",
":",
"print",
"(",
"\"Skipping folder: \"",
"+",
"file_name",
"+",
"\"; use '--dir-mode' to upload folders\"",
")",
"else",
":",
"if",
"not",
"quiet",
":",
"print",
"(",
"'Skipping: '",
"+",
"file_name",
")"
] | upload files in a folder
Parameters
==========
request: the prepared request
resources: the files to upload
folder: the folder to upload from
quiet: suppress verbose output (default is False) | [
"upload",
"files",
"in",
"a",
"folder",
"Parameters",
"==========",
"request",
":",
"the",
"prepared",
"request",
"resources",
":",
"the",
"files",
"to",
"upload",
"folder",
":",
"the",
"folder",
"to",
"upload",
"from",
"quiet",
":",
"suppress",
"verbose",
"output",
"(",
"default",
"is",
"False",
")"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2235-L2282 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi._upload_file | def _upload_file(self, file_name, full_path, quiet, request, resources):
""" Helper function to upload a single file
Parameters
==========
file_name: name of the file to upload
full_path: path to the file to upload
request: the prepared request
resources: optional file metadata
quiet: suppress verbose output
:return: True - upload unsuccessful; False - upload successful
"""
if not quiet:
print('Starting upload for file ' + file_name)
content_length = os.path.getsize(full_path)
token = self.dataset_upload_file(full_path, quiet)
if token is None:
if not quiet:
print('Upload unsuccessful: ' + file_name)
return True
if not quiet:
print('Upload successful: ' + file_name + ' (' +
File.get_size(content_length) + ')')
upload_file = DatasetUploadFile()
upload_file.token = token
if resources:
for item in resources:
if file_name == item.get('path'):
upload_file.description = item.get('description')
if 'schema' in item:
fields = self.get_or_default(item['schema'], 'fields',
[])
processed = []
count = 0
for field in fields:
processed.append(self.process_column(field))
processed[count].order = count
count += 1
upload_file.columns = processed
request.files.append(upload_file)
return False | python | def _upload_file(self, file_name, full_path, quiet, request, resources):
""" Helper function to upload a single file
Parameters
==========
file_name: name of the file to upload
full_path: path to the file to upload
request: the prepared request
resources: optional file metadata
quiet: suppress verbose output
:return: True - upload unsuccessful; False - upload successful
"""
if not quiet:
print('Starting upload for file ' + file_name)
content_length = os.path.getsize(full_path)
token = self.dataset_upload_file(full_path, quiet)
if token is None:
if not quiet:
print('Upload unsuccessful: ' + file_name)
return True
if not quiet:
print('Upload successful: ' + file_name + ' (' +
File.get_size(content_length) + ')')
upload_file = DatasetUploadFile()
upload_file.token = token
if resources:
for item in resources:
if file_name == item.get('path'):
upload_file.description = item.get('description')
if 'schema' in item:
fields = self.get_or_default(item['schema'], 'fields',
[])
processed = []
count = 0
for field in fields:
processed.append(self.process_column(field))
processed[count].order = count
count += 1
upload_file.columns = processed
request.files.append(upload_file)
return False | [
"def",
"_upload_file",
"(",
"self",
",",
"file_name",
",",
"full_path",
",",
"quiet",
",",
"request",
",",
"resources",
")",
":",
"if",
"not",
"quiet",
":",
"print",
"(",
"'Starting upload for file '",
"+",
"file_name",
")",
"content_length",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"full_path",
")",
"token",
"=",
"self",
".",
"dataset_upload_file",
"(",
"full_path",
",",
"quiet",
")",
"if",
"token",
"is",
"None",
":",
"if",
"not",
"quiet",
":",
"print",
"(",
"'Upload unsuccessful: '",
"+",
"file_name",
")",
"return",
"True",
"if",
"not",
"quiet",
":",
"print",
"(",
"'Upload successful: '",
"+",
"file_name",
"+",
"' ('",
"+",
"File",
".",
"get_size",
"(",
"content_length",
")",
"+",
"')'",
")",
"upload_file",
"=",
"DatasetUploadFile",
"(",
")",
"upload_file",
".",
"token",
"=",
"token",
"if",
"resources",
":",
"for",
"item",
"in",
"resources",
":",
"if",
"file_name",
"==",
"item",
".",
"get",
"(",
"'path'",
")",
":",
"upload_file",
".",
"description",
"=",
"item",
".",
"get",
"(",
"'description'",
")",
"if",
"'schema'",
"in",
"item",
":",
"fields",
"=",
"self",
".",
"get_or_default",
"(",
"item",
"[",
"'schema'",
"]",
",",
"'fields'",
",",
"[",
"]",
")",
"processed",
"=",
"[",
"]",
"count",
"=",
"0",
"for",
"field",
"in",
"fields",
":",
"processed",
".",
"append",
"(",
"self",
".",
"process_column",
"(",
"field",
")",
")",
"processed",
"[",
"count",
"]",
".",
"order",
"=",
"count",
"count",
"+=",
"1",
"upload_file",
".",
"columns",
"=",
"processed",
"request",
".",
"files",
".",
"append",
"(",
"upload_file",
")",
"return",
"False"
] | Helper function to upload a single file
Parameters
==========
file_name: name of the file to upload
full_path: path to the file to upload
request: the prepared request
resources: optional file metadata
quiet: suppress verbose output
:return: True - upload unsuccessful; False - upload successful | [
"Helper",
"function",
"to",
"upload",
"a",
"single",
"file",
"Parameters",
"==========",
"file_name",
":",
"name",
"of",
"the",
"file",
"to",
"upload",
"full_path",
":",
"path",
"to",
"the",
"file",
"to",
"upload",
"request",
":",
"the",
"prepared",
"request",
"resources",
":",
"optional",
"file",
"metadata",
"quiet",
":",
"suppress",
"verbose",
"output",
":",
"return",
":",
"True",
"-",
"upload",
"unsuccessful",
";",
"False",
"-",
"upload",
"successful"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2284-L2325 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.process_column | def process_column(self, column):
""" process a column, check for the type, and return the processed
column
Parameters
==========
column: a list of values in a column to be processed
"""
processed_column = DatasetColumn(
name=self.get_or_fail(column, 'name'),
description=self.get_or_default(column, 'description', ''))
if 'type' in column:
original_type = column['type'].lower()
processed_column.original_type = original_type
if (original_type == 'string' or original_type == 'date'
or original_type == 'time' or original_type == 'yearmonth'
or original_type == 'duration'
or original_type == 'geopoint'
or original_type == 'geojson'):
processed_column.type = 'string'
elif (original_type == 'numeric' or original_type == 'number'
or original_type == 'year'):
processed_column.type = 'numeric'
elif original_type == 'boolean':
processed_column.type = 'boolean'
elif original_type == 'datetime':
processed_column.type = 'datetime'
else:
# Possibly extended data type - not going to try to track those
# here. Will set the type and let the server handle it.
processed_column.type = original_type
return processed_column | python | def process_column(self, column):
""" process a column, check for the type, and return the processed
column
Parameters
==========
column: a list of values in a column to be processed
"""
processed_column = DatasetColumn(
name=self.get_or_fail(column, 'name'),
description=self.get_or_default(column, 'description', ''))
if 'type' in column:
original_type = column['type'].lower()
processed_column.original_type = original_type
if (original_type == 'string' or original_type == 'date'
or original_type == 'time' or original_type == 'yearmonth'
or original_type == 'duration'
or original_type == 'geopoint'
or original_type == 'geojson'):
processed_column.type = 'string'
elif (original_type == 'numeric' or original_type == 'number'
or original_type == 'year'):
processed_column.type = 'numeric'
elif original_type == 'boolean':
processed_column.type = 'boolean'
elif original_type == 'datetime':
processed_column.type = 'datetime'
else:
# Possibly extended data type - not going to try to track those
# here. Will set the type and let the server handle it.
processed_column.type = original_type
return processed_column | [
"def",
"process_column",
"(",
"self",
",",
"column",
")",
":",
"processed_column",
"=",
"DatasetColumn",
"(",
"name",
"=",
"self",
".",
"get_or_fail",
"(",
"column",
",",
"'name'",
")",
",",
"description",
"=",
"self",
".",
"get_or_default",
"(",
"column",
",",
"'description'",
",",
"''",
")",
")",
"if",
"'type'",
"in",
"column",
":",
"original_type",
"=",
"column",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"processed_column",
".",
"original_type",
"=",
"original_type",
"if",
"(",
"original_type",
"==",
"'string'",
"or",
"original_type",
"==",
"'date'",
"or",
"original_type",
"==",
"'time'",
"or",
"original_type",
"==",
"'yearmonth'",
"or",
"original_type",
"==",
"'duration'",
"or",
"original_type",
"==",
"'geopoint'",
"or",
"original_type",
"==",
"'geojson'",
")",
":",
"processed_column",
".",
"type",
"=",
"'string'",
"elif",
"(",
"original_type",
"==",
"'numeric'",
"or",
"original_type",
"==",
"'number'",
"or",
"original_type",
"==",
"'year'",
")",
":",
"processed_column",
".",
"type",
"=",
"'numeric'",
"elif",
"original_type",
"==",
"'boolean'",
":",
"processed_column",
".",
"type",
"=",
"'boolean'",
"elif",
"original_type",
"==",
"'datetime'",
":",
"processed_column",
".",
"type",
"=",
"'datetime'",
"else",
":",
"# Possibly extended data type - not going to try to track those",
"# here. Will set the type and let the server handle it.",
"processed_column",
".",
"type",
"=",
"original_type",
"return",
"processed_column"
] | process a column, check for the type, and return the processed
column
Parameters
==========
column: a list of values in a column to be processed | [
"process",
"a",
"column",
"check",
"for",
"the",
"type",
"and",
"return",
"the",
"processed",
"column",
"Parameters",
"==========",
"column",
":",
"a",
"list",
"of",
"values",
"in",
"a",
"column",
"to",
"be",
"processed"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2327-L2357 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.upload_complete | def upload_complete(self, path, url, quiet):
""" function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
"""
file_size = os.path.getsize(path)
try:
with tqdm(
total=file_size,
unit='B',
unit_scale=True,
unit_divisor=1024,
disable=quiet) as progress_bar:
with io.open(path, 'rb', buffering=0) as fp:
reader = TqdmBufferedReader(fp, progress_bar)
session = requests.Session()
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.put(url, data=reader)
except Exception as error:
print(error)
return False
return response.status_code == 200 or response.status_code == 201 | python | def upload_complete(self, path, url, quiet):
""" function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
"""
file_size = os.path.getsize(path)
try:
with tqdm(
total=file_size,
unit='B',
unit_scale=True,
unit_divisor=1024,
disable=quiet) as progress_bar:
with io.open(path, 'rb', buffering=0) as fp:
reader = TqdmBufferedReader(fp, progress_bar)
session = requests.Session()
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.put(url, data=reader)
except Exception as error:
print(error)
return False
return response.status_code == 200 or response.status_code == 201 | [
"def",
"upload_complete",
"(",
"self",
",",
"path",
",",
"url",
",",
"quiet",
")",
":",
"file_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
"try",
":",
"with",
"tqdm",
"(",
"total",
"=",
"file_size",
",",
"unit",
"=",
"'B'",
",",
"unit_scale",
"=",
"True",
",",
"unit_divisor",
"=",
"1024",
",",
"disable",
"=",
"quiet",
")",
"as",
"progress_bar",
":",
"with",
"io",
".",
"open",
"(",
"path",
",",
"'rb'",
",",
"buffering",
"=",
"0",
")",
"as",
"fp",
":",
"reader",
"=",
"TqdmBufferedReader",
"(",
"fp",
",",
"progress_bar",
")",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"retries",
"=",
"Retry",
"(",
"total",
"=",
"10",
",",
"backoff_factor",
"=",
"0.5",
")",
"adapter",
"=",
"HTTPAdapter",
"(",
"max_retries",
"=",
"retries",
")",
"session",
".",
"mount",
"(",
"'http://'",
",",
"adapter",
")",
"session",
".",
"mount",
"(",
"'https://'",
",",
"adapter",
")",
"response",
"=",
"session",
".",
"put",
"(",
"url",
",",
"data",
"=",
"reader",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"error",
")",
"return",
"False",
"return",
"response",
".",
"status_code",
"==",
"200",
"or",
"response",
".",
"status_code",
"==",
"201"
] | function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False) | [
"function",
"to",
"complete",
"an",
"upload",
"to",
"retrieve",
"a",
"path",
"from",
"a",
"url",
"Parameters",
"==========",
"path",
":",
"the",
"path",
"for",
"the",
"upload",
"that",
"is",
"read",
"in",
"url",
":",
"the",
"url",
"to",
"send",
"the",
"POST",
"to",
"quiet",
":",
"suppress",
"verbose",
"output",
"(",
"default",
"is",
"False",
")"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2359-L2386 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.validate_dataset_string | def validate_dataset_string(self, dataset):
""" determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate
"""
if dataset:
if '/' not in dataset:
raise ValueError('Dataset must be specified in the form of '
'\'{username}/{dataset-slug}\'')
split = dataset.split('/')
if not split[0] or not split[1]:
raise ValueError('Invalid dataset specification ' + dataset) | python | def validate_dataset_string(self, dataset):
""" determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate
"""
if dataset:
if '/' not in dataset:
raise ValueError('Dataset must be specified in the form of '
'\'{username}/{dataset-slug}\'')
split = dataset.split('/')
if not split[0] or not split[1]:
raise ValueError('Invalid dataset specification ' + dataset) | [
"def",
"validate_dataset_string",
"(",
"self",
",",
"dataset",
")",
":",
"if",
"dataset",
":",
"if",
"'/'",
"not",
"in",
"dataset",
":",
"raise",
"ValueError",
"(",
"'Dataset must be specified in the form of '",
"'\\'{username}/{dataset-slug}\\''",
")",
"split",
"=",
"dataset",
".",
"split",
"(",
"'/'",
")",
"if",
"not",
"split",
"[",
"0",
"]",
"or",
"not",
"split",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Invalid dataset specification '",
"+",
"dataset",
")"
] | determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate | [
"determine",
"if",
"a",
"dataset",
"string",
"is",
"valid",
"meaning",
"it",
"is",
"in",
"the",
"format",
"of",
"{",
"username",
"}",
"/",
"{",
"dataset",
"-",
"slug",
"}",
".",
"Parameters",
"==========",
"dataset",
":",
"the",
"dataset",
"name",
"to",
"validate"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2388-L2402 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.validate_kernel_string | def validate_kernel_string(self, kernel):
""" determine if a kernel string is valid, meaning it is in the format
of {username}/{kernel-slug}.
Parameters
==========
kernel: the kernel name to validate
"""
if kernel:
if '/' not in kernel:
raise ValueError('Kernel must be specified in the form of '
'\'{username}/{kernel-slug}\'')
split = kernel.split('/')
if not split[0] or not split[1]:
raise ValueError('Kernel must be specified in the form of '
'\'{username}/{kernel-slug}\'')
if len(split[1]) < 5:
raise ValueError(
'Kernel slug must be at least five characters') | python | def validate_kernel_string(self, kernel):
""" determine if a kernel string is valid, meaning it is in the format
of {username}/{kernel-slug}.
Parameters
==========
kernel: the kernel name to validate
"""
if kernel:
if '/' not in kernel:
raise ValueError('Kernel must be specified in the form of '
'\'{username}/{kernel-slug}\'')
split = kernel.split('/')
if not split[0] or not split[1]:
raise ValueError('Kernel must be specified in the form of '
'\'{username}/{kernel-slug}\'')
if len(split[1]) < 5:
raise ValueError(
'Kernel slug must be at least five characters') | [
"def",
"validate_kernel_string",
"(",
"self",
",",
"kernel",
")",
":",
"if",
"kernel",
":",
"if",
"'/'",
"not",
"in",
"kernel",
":",
"raise",
"ValueError",
"(",
"'Kernel must be specified in the form of '",
"'\\'{username}/{kernel-slug}\\''",
")",
"split",
"=",
"kernel",
".",
"split",
"(",
"'/'",
")",
"if",
"not",
"split",
"[",
"0",
"]",
"or",
"not",
"split",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Kernel must be specified in the form of '",
"'\\'{username}/{kernel-slug}\\''",
")",
"if",
"len",
"(",
"split",
"[",
"1",
"]",
")",
"<",
"5",
":",
"raise",
"ValueError",
"(",
"'Kernel slug must be at least five characters'",
")"
] | determine if a kernel string is valid, meaning it is in the format
of {username}/{kernel-slug}.
Parameters
==========
kernel: the kernel name to validate | [
"determine",
"if",
"a",
"kernel",
"string",
"is",
"valid",
"meaning",
"it",
"is",
"in",
"the",
"format",
"of",
"{",
"username",
"}",
"/",
"{",
"kernel",
"-",
"slug",
"}",
".",
"Parameters",
"==========",
"kernel",
":",
"the",
"kernel",
"name",
"to",
"validate"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2404-L2423 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.validate_resources | def validate_resources(self, folder, resources):
""" validate resources is a wrapper to validate the existence of files
and that there are no duplicates for a folder and set of resources.
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder
"""
self.validate_files_exist(folder, resources)
self.validate_no_duplicate_paths(resources) | python | def validate_resources(self, folder, resources):
""" validate resources is a wrapper to validate the existence of files
and that there are no duplicates for a folder and set of resources.
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder
"""
self.validate_files_exist(folder, resources)
self.validate_no_duplicate_paths(resources) | [
"def",
"validate_resources",
"(",
"self",
",",
"folder",
",",
"resources",
")",
":",
"self",
".",
"validate_files_exist",
"(",
"folder",
",",
"resources",
")",
"self",
".",
"validate_no_duplicate_paths",
"(",
"resources",
")"
] | validate resources is a wrapper to validate the existence of files
and that there are no duplicates for a folder and set of resources.
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder | [
"validate",
"resources",
"is",
"a",
"wrapper",
"to",
"validate",
"the",
"existence",
"of",
"files",
"and",
"that",
"there",
"are",
"no",
"duplicates",
"for",
"a",
"folder",
"and",
"set",
"of",
"resources",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2425-L2435 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.validate_files_exist | def validate_files_exist(self, folder, resources):
""" ensure that one or more resource files exist in a folder
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder
"""
for item in resources:
file_name = item.get('path')
full_path = os.path.join(folder, file_name)
if not os.path.isfile(full_path):
raise ValueError('%s does not exist' % full_path) | python | def validate_files_exist(self, folder, resources):
""" ensure that one or more resource files exist in a folder
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder
"""
for item in resources:
file_name = item.get('path')
full_path = os.path.join(folder, file_name)
if not os.path.isfile(full_path):
raise ValueError('%s does not exist' % full_path) | [
"def",
"validate_files_exist",
"(",
"self",
",",
"folder",
",",
"resources",
")",
":",
"for",
"item",
"in",
"resources",
":",
"file_name",
"=",
"item",
".",
"get",
"(",
"'path'",
")",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"file_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"raise",
"ValueError",
"(",
"'%s does not exist'",
"%",
"full_path",
")"
] | ensure that one or more resource files exist in a folder
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder | [
"ensure",
"that",
"one",
"or",
"more",
"resource",
"files",
"exist",
"in",
"a",
"folder"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2437-L2449 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.validate_no_duplicate_paths | def validate_no_duplicate_paths(self, resources):
""" ensure that the user has not provided duplicate paths in
a list of resources.
Parameters
==========
resources: one or more resources to validate not duplicated
"""
paths = set()
for item in resources:
file_name = item.get('path')
if file_name in paths:
raise ValueError(
'%s path was specified more than once in the metadata' %
file_name)
paths.add(file_name) | python | def validate_no_duplicate_paths(self, resources):
""" ensure that the user has not provided duplicate paths in
a list of resources.
Parameters
==========
resources: one or more resources to validate not duplicated
"""
paths = set()
for item in resources:
file_name = item.get('path')
if file_name in paths:
raise ValueError(
'%s path was specified more than once in the metadata' %
file_name)
paths.add(file_name) | [
"def",
"validate_no_duplicate_paths",
"(",
"self",
",",
"resources",
")",
":",
"paths",
"=",
"set",
"(",
")",
"for",
"item",
"in",
"resources",
":",
"file_name",
"=",
"item",
".",
"get",
"(",
"'path'",
")",
"if",
"file_name",
"in",
"paths",
":",
"raise",
"ValueError",
"(",
"'%s path was specified more than once in the metadata'",
"%",
"file_name",
")",
"paths",
".",
"add",
"(",
"file_name",
")"
] | ensure that the user has not provided duplicate paths in
a list of resources.
Parameters
==========
resources: one or more resources to validate not duplicated | [
"ensure",
"that",
"the",
"user",
"has",
"not",
"provided",
"duplicate",
"paths",
"in",
"a",
"list",
"of",
"resources",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2451-L2466 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.convert_to_dataset_file_metadata | def convert_to_dataset_file_metadata(self, file_data, path):
""" convert a set of file_data to a metadata file at path
Parameters
==========
file_data: a dictionary of file data to write to file
path: the path to write the metadata to
"""
as_metadata = {
'path': os.path.join(path, file_data['name']),
'description': file_data['description']
}
schema = {}
fields = []
for column in file_data['columns']:
field = {
'name': column['name'],
'title': column['description'],
'type': column['type']
}
fields.append(field)
schema['fields'] = fields
as_metadata['schema'] = schema
return as_metadata | python | def convert_to_dataset_file_metadata(self, file_data, path):
""" convert a set of file_data to a metadata file at path
Parameters
==========
file_data: a dictionary of file data to write to file
path: the path to write the metadata to
"""
as_metadata = {
'path': os.path.join(path, file_data['name']),
'description': file_data['description']
}
schema = {}
fields = []
for column in file_data['columns']:
field = {
'name': column['name'],
'title': column['description'],
'type': column['type']
}
fields.append(field)
schema['fields'] = fields
as_metadata['schema'] = schema
return as_metadata | [
"def",
"convert_to_dataset_file_metadata",
"(",
"self",
",",
"file_data",
",",
"path",
")",
":",
"as_metadata",
"=",
"{",
"'path'",
":",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file_data",
"[",
"'name'",
"]",
")",
",",
"'description'",
":",
"file_data",
"[",
"'description'",
"]",
"}",
"schema",
"=",
"{",
"}",
"fields",
"=",
"[",
"]",
"for",
"column",
"in",
"file_data",
"[",
"'columns'",
"]",
":",
"field",
"=",
"{",
"'name'",
":",
"column",
"[",
"'name'",
"]",
",",
"'title'",
":",
"column",
"[",
"'description'",
"]",
",",
"'type'",
":",
"column",
"[",
"'type'",
"]",
"}",
"fields",
".",
"append",
"(",
"field",
")",
"schema",
"[",
"'fields'",
"]",
"=",
"fields",
"as_metadata",
"[",
"'schema'",
"]",
"=",
"schema",
"return",
"as_metadata"
] | convert a set of file_data to a metadata file at path
Parameters
==========
file_data: a dictionary of file data to write to file
path: the path to write the metadata to | [
"convert",
"a",
"set",
"of",
"file_data",
"to",
"a",
"metadata",
"file",
"at",
"path"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2468-L2493 | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | TqdmBufferedReader.read | def read(self, *args, **kwargs):
""" read the buffer, passing named and non named arguments to the
io.BufferedReader function.
"""
buf = io.BufferedReader.read(self, *args, **kwargs)
self.increment(len(buf))
return buf | python | def read(self, *args, **kwargs):
""" read the buffer, passing named and non named arguments to the
io.BufferedReader function.
"""
buf = io.BufferedReader.read(self, *args, **kwargs)
self.increment(len(buf))
return buf | [
"def",
"read",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"buf",
"=",
"io",
".",
"BufferedReader",
".",
"read",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"increment",
"(",
"len",
"(",
"buf",
")",
")",
"return",
"buf"
] | read the buffer, passing named and non named arguments to the
io.BufferedReader function. | [
"read",
"the",
"buffer",
"passing",
"named",
"and",
"non",
"named",
"arguments",
"to",
"the",
"io",
".",
"BufferedReader",
"function",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2507-L2513 | train |
Kaggle/kaggle-api | kaggle/api_client.py | ApiClient.parameters_to_tuples | def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params | python | def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params | [
"def",
"parameters_to_tuples",
"(",
"self",
",",
"params",
",",
"collection_formats",
")",
":",
"new_params",
"=",
"[",
"]",
"if",
"collection_formats",
"is",
"None",
":",
"collection_formats",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"params",
")",
"if",
"isinstance",
"(",
"params",
",",
"dict",
")",
"else",
"params",
":",
"# noqa: E501",
"if",
"k",
"in",
"collection_formats",
":",
"collection_format",
"=",
"collection_formats",
"[",
"k",
"]",
"if",
"collection_format",
"==",
"'multi'",
":",
"new_params",
".",
"extend",
"(",
"(",
"k",
",",
"value",
")",
"for",
"value",
"in",
"v",
")",
"else",
":",
"if",
"collection_format",
"==",
"'ssv'",
":",
"delimiter",
"=",
"' '",
"elif",
"collection_format",
"==",
"'tsv'",
":",
"delimiter",
"=",
"'\\t'",
"elif",
"collection_format",
"==",
"'pipes'",
":",
"delimiter",
"=",
"'|'",
"else",
":",
"# csv is the default",
"delimiter",
"=",
"','",
"new_params",
".",
"append",
"(",
"(",
"k",
",",
"delimiter",
".",
"join",
"(",
"str",
"(",
"value",
")",
"for",
"value",
"in",
"v",
")",
")",
")",
"else",
":",
"new_params",
".",
"append",
"(",
"(",
"k",
",",
"v",
")",
")",
"return",
"new_params"
] | Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted | [
"Get",
"parameters",
"as",
"list",
"of",
"tuples",
"formatting",
"collections",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api_client.py#L407-L435 | train |
Kaggle/kaggle-api | kaggle/api_client.py | ApiClient.prepare_post_parameters | def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params | python | def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params | [
"def",
"prepare_post_parameters",
"(",
"self",
",",
"post_params",
"=",
"None",
",",
"files",
"=",
"None",
")",
":",
"params",
"=",
"[",
"]",
"if",
"post_params",
":",
"params",
"=",
"post_params",
"if",
"files",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"files",
")",
":",
"if",
"not",
"v",
":",
"continue",
"file_names",
"=",
"v",
"if",
"type",
"(",
"v",
")",
"is",
"list",
"else",
"[",
"v",
"]",
"for",
"n",
"in",
"file_names",
":",
"with",
"open",
"(",
"n",
",",
"'rb'",
")",
"as",
"f",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"f",
".",
"name",
")",
"filedata",
"=",
"f",
".",
"read",
"(",
")",
"mimetype",
"=",
"(",
"mimetypes",
".",
"guess_type",
"(",
"filename",
")",
"[",
"0",
"]",
"or",
"'application/octet-stream'",
")",
"params",
".",
"append",
"(",
"tuple",
"(",
"[",
"k",
",",
"tuple",
"(",
"[",
"filename",
",",
"filedata",
",",
"mimetype",
"]",
")",
"]",
")",
")",
"return",
"params"
] | Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files. | [
"Builds",
"form",
"parameters",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api_client.py#L437-L463 | train |
Kaggle/kaggle-api | kaggle/api_client.py | ApiClient.__deserialize_file | def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path | python | def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path | [
"def",
"__deserialize_file",
"(",
"self",
",",
"response",
")",
":",
"fd",
",",
"path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"dir",
"=",
"self",
".",
"configuration",
".",
"temp_folder_path",
")",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"remove",
"(",
"path",
")",
"content_disposition",
"=",
"response",
".",
"getheader",
"(",
"\"Content-Disposition\"",
")",
"if",
"content_disposition",
":",
"filename",
"=",
"re",
".",
"search",
"(",
"r'filename=[\\'\"]?([^\\'\"\\s]+)[\\'\"]?'",
",",
"content_disposition",
")",
".",
"group",
"(",
"1",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"filename",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"response",
".",
"data",
")",
"return",
"path"
] | Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path. | [
"Deserializes",
"body",
"to",
"file"
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api_client.py#L521-L543 | train |
Kaggle/kaggle-api | kaggle/api_client.py | ApiClient.__deserialize_primitive | def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data | python | def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data | [
"def",
"__deserialize_primitive",
"(",
"self",
",",
"data",
",",
"klass",
")",
":",
"try",
":",
"return",
"klass",
"(",
"data",
")",
"except",
"UnicodeEncodeError",
":",
"return",
"six",
".",
"text_type",
"(",
"data",
")",
"except",
"TypeError",
":",
"return",
"data"
] | Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool. | [
"Deserializes",
"string",
"to",
"primitive",
"type",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api_client.py#L545-L558 | train |
Kaggle/kaggle-api | kaggle/configuration.py | Configuration.logger_file | def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler) | python | def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler) | [
"def",
"logger_file",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"__logger_file",
"=",
"value",
"if",
"self",
".",
"__logger_file",
":",
"# If set logging file,",
"# then add file handler and remove stream handler.",
"self",
".",
"logger_file_handler",
"=",
"logging",
".",
"FileHandler",
"(",
"self",
".",
"__logger_file",
")",
"self",
".",
"logger_file_handler",
".",
"setFormatter",
"(",
"self",
".",
"logger_formatter",
")",
"for",
"_",
",",
"logger",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"logger",
")",
":",
"logger",
".",
"addHandler",
"(",
"self",
".",
"logger_file_handler",
")",
"if",
"self",
".",
"logger_stream_handler",
":",
"logger",
".",
"removeHandler",
"(",
"self",
".",
"logger_stream_handler",
")",
"else",
":",
"# If not set logging file,",
"# then add stream handler and remove file handler.",
"self",
".",
"logger_stream_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"self",
".",
"logger_stream_handler",
".",
"setFormatter",
"(",
"self",
".",
"logger_formatter",
")",
"for",
"_",
",",
"logger",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"logger",
")",
":",
"logger",
".",
"addHandler",
"(",
"self",
".",
"logger_stream_handler",
")",
"if",
"self",
".",
"logger_file_handler",
":",
"logger",
".",
"removeHandler",
"(",
"self",
".",
"logger_file_handler",
")"
] | The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str | [
"The",
"logger",
"file",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/configuration.py#L133-L160 | train |
Kaggle/kaggle-api | kaggle/models/dataset_new_request.py | DatasetNewRequest.license_name | def license_name(self, license_name):
"""Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str
"""
allowed_values = ["CC0-1.0", "CC-BY-SA-4.0", "GPL-2.0", "ODbL-1.0", "CC-BY-NC-SA-4.0", "unknown", "DbCL-1.0", "CC-BY-SA-3.0", "copyright-authors", "other", "reddit-api", "world-bank"] # noqa: E501
if license_name not in allowed_values:
raise ValueError(
"Invalid value for `license_name` ({0}), must be one of {1}" # noqa: E501
.format(license_name, allowed_values)
)
self._license_name = license_name | python | def license_name(self, license_name):
"""Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str
"""
allowed_values = ["CC0-1.0", "CC-BY-SA-4.0", "GPL-2.0", "ODbL-1.0", "CC-BY-NC-SA-4.0", "unknown", "DbCL-1.0", "CC-BY-SA-3.0", "copyright-authors", "other", "reddit-api", "world-bank"] # noqa: E501
if license_name not in allowed_values:
raise ValueError(
"Invalid value for `license_name` ({0}), must be one of {1}" # noqa: E501
.format(license_name, allowed_values)
)
self._license_name = license_name | [
"def",
"license_name",
"(",
"self",
",",
"license_name",
")",
":",
"allowed_values",
"=",
"[",
"\"CC0-1.0\"",
",",
"\"CC-BY-SA-4.0\"",
",",
"\"GPL-2.0\"",
",",
"\"ODbL-1.0\"",
",",
"\"CC-BY-NC-SA-4.0\"",
",",
"\"unknown\"",
",",
"\"DbCL-1.0\"",
",",
"\"CC-BY-SA-3.0\"",
",",
"\"copyright-authors\"",
",",
"\"other\"",
",",
"\"reddit-api\"",
",",
"\"world-bank\"",
"]",
"# noqa: E501",
"if",
"license_name",
"not",
"in",
"allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `license_name` ({0}), must be one of {1}\"",
"# noqa: E501",
".",
"format",
"(",
"license_name",
",",
"allowed_values",
")",
")",
"self",
".",
"_license_name",
"=",
"license_name"
] | Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str | [
"Sets",
"the",
"license_name",
"of",
"this",
"DatasetNewRequest",
"."
] | 65f14b1386470c5784d4753e491478e7537660d9 | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/models/dataset_new_request.py#L194-L209 | train |
dmlc/gluon-nlp | scripts/sentiment_analysis/sentiment_analysis_cnn.py | train | def train(net, train_data, test_data):
"""Train textCNN model for sentiment analysis."""
start_pipeline_time = time.time()
net, trainer = text_cnn.init(net, vocab, args.model_mode, context, args.lr)
random.shuffle(train_data)
sp = int(len(train_data)*0.9)
train_dataloader = DataLoader(dataset=train_data[:sp],
batch_size=args.batch_size,
shuffle=True)
val_dataloader = DataLoader(dataset=train_data[sp:],
batch_size=args.batch_size,
shuffle=False)
test_dataloader = DataLoader(dataset=test_data,
batch_size=args.batch_size,
shuffle=False)
# Training/Testing.
best_val_acc = 0
for epoch in range(args.epochs):
# Epoch training stats.
start_epoch_time = time.time()
epoch_L = 0.0
epoch_sent_num = 0
epoch_wc = 0
# Log interval training stats.
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0.0
for i, (data, label) in enumerate(train_dataloader):
data = mx.nd.transpose(data.as_in_context(context))
label = label.as_in_context(context)
wc = max_len
log_interval_wc += wc
epoch_wc += wc
log_interval_sent_num += data.shape[1]
epoch_sent_num += data.shape[1]
with autograd.record():
output = net(data)
L = loss(output, label).mean()
L.backward()
# Update parameter.
trainer.step(1)
log_interval_L += L.asscalar()
epoch_L += L.asscalar()
if (i + 1) % args.log_interval == 0:
print('[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % (
epoch, i + 1, len(train_dataloader),
log_interval_L / log_interval_sent_num,
log_interval_wc / 1000 / (time.time() - start_log_interval_time)))
# Clear log interval training stats.
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0
end_epoch_time = time.time()
val_avg_L, val_acc = evaluate(net, val_dataloader)
print('[Epoch %d] train avg loss %g, '
'test acc %.4f, test avg loss %g, throughput %gK wps' % (
epoch, epoch_L / epoch_sent_num,
val_acc, val_avg_L,
epoch_wc / 1000 / (end_epoch_time - start_epoch_time)))
if val_acc >= best_val_acc:
print('Observed Improvement.')
best_val_acc = val_acc
test_avg_L, test_acc = evaluate(net, test_dataloader)
print('Test loss %g, test acc %.4f'%(test_avg_L, test_acc))
print('Total time cost %.2fs'%(time.time()-start_pipeline_time))
return test_acc | python | def train(net, train_data, test_data):
"""Train textCNN model for sentiment analysis."""
start_pipeline_time = time.time()
net, trainer = text_cnn.init(net, vocab, args.model_mode, context, args.lr)
random.shuffle(train_data)
sp = int(len(train_data)*0.9)
train_dataloader = DataLoader(dataset=train_data[:sp],
batch_size=args.batch_size,
shuffle=True)
val_dataloader = DataLoader(dataset=train_data[sp:],
batch_size=args.batch_size,
shuffle=False)
test_dataloader = DataLoader(dataset=test_data,
batch_size=args.batch_size,
shuffle=False)
# Training/Testing.
best_val_acc = 0
for epoch in range(args.epochs):
# Epoch training stats.
start_epoch_time = time.time()
epoch_L = 0.0
epoch_sent_num = 0
epoch_wc = 0
# Log interval training stats.
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0.0
for i, (data, label) in enumerate(train_dataloader):
data = mx.nd.transpose(data.as_in_context(context))
label = label.as_in_context(context)
wc = max_len
log_interval_wc += wc
epoch_wc += wc
log_interval_sent_num += data.shape[1]
epoch_sent_num += data.shape[1]
with autograd.record():
output = net(data)
L = loss(output, label).mean()
L.backward()
# Update parameter.
trainer.step(1)
log_interval_L += L.asscalar()
epoch_L += L.asscalar()
if (i + 1) % args.log_interval == 0:
print('[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % (
epoch, i + 1, len(train_dataloader),
log_interval_L / log_interval_sent_num,
log_interval_wc / 1000 / (time.time() - start_log_interval_time)))
# Clear log interval training stats.
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0
end_epoch_time = time.time()
val_avg_L, val_acc = evaluate(net, val_dataloader)
print('[Epoch %d] train avg loss %g, '
'test acc %.4f, test avg loss %g, throughput %gK wps' % (
epoch, epoch_L / epoch_sent_num,
val_acc, val_avg_L,
epoch_wc / 1000 / (end_epoch_time - start_epoch_time)))
if val_acc >= best_val_acc:
print('Observed Improvement.')
best_val_acc = val_acc
test_avg_L, test_acc = evaluate(net, test_dataloader)
print('Test loss %g, test acc %.4f'%(test_avg_L, test_acc))
print('Total time cost %.2fs'%(time.time()-start_pipeline_time))
return test_acc | [
"def",
"train",
"(",
"net",
",",
"train_data",
",",
"test_data",
")",
":",
"start_pipeline_time",
"=",
"time",
".",
"time",
"(",
")",
"net",
",",
"trainer",
"=",
"text_cnn",
".",
"init",
"(",
"net",
",",
"vocab",
",",
"args",
".",
"model_mode",
",",
"context",
",",
"args",
".",
"lr",
")",
"random",
".",
"shuffle",
"(",
"train_data",
")",
"sp",
"=",
"int",
"(",
"len",
"(",
"train_data",
")",
"*",
"0.9",
")",
"train_dataloader",
"=",
"DataLoader",
"(",
"dataset",
"=",
"train_data",
"[",
":",
"sp",
"]",
",",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
"val_dataloader",
"=",
"DataLoader",
"(",
"dataset",
"=",
"train_data",
"[",
"sp",
":",
"]",
",",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"shuffle",
"=",
"False",
")",
"test_dataloader",
"=",
"DataLoader",
"(",
"dataset",
"=",
"test_data",
",",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"shuffle",
"=",
"False",
")",
"# Training/Testing.",
"best_val_acc",
"=",
"0",
"for",
"epoch",
"in",
"range",
"(",
"args",
".",
"epochs",
")",
":",
"# Epoch training stats.",
"start_epoch_time",
"=",
"time",
".",
"time",
"(",
")",
"epoch_L",
"=",
"0.0",
"epoch_sent_num",
"=",
"0",
"epoch_wc",
"=",
"0",
"# Log interval training stats.",
"start_log_interval_time",
"=",
"time",
".",
"time",
"(",
")",
"log_interval_wc",
"=",
"0",
"log_interval_sent_num",
"=",
"0",
"log_interval_L",
"=",
"0.0",
"for",
"i",
",",
"(",
"data",
",",
"label",
")",
"in",
"enumerate",
"(",
"train_dataloader",
")",
":",
"data",
"=",
"mx",
".",
"nd",
".",
"transpose",
"(",
"data",
".",
"as_in_context",
"(",
"context",
")",
")",
"label",
"=",
"label",
".",
"as_in_context",
"(",
"context",
")",
"wc",
"=",
"max_len",
"log_interval_wc",
"+=",
"wc",
"epoch_wc",
"+=",
"wc",
"log_interval_sent_num",
"+=",
"data",
".",
"shape",
"[",
"1",
"]",
"epoch_sent_num",
"+=",
"data",
".",
"shape",
"[",
"1",
"]",
"with",
"autograd",
".",
"record",
"(",
")",
":",
"output",
"=",
"net",
"(",
"data",
")",
"L",
"=",
"loss",
"(",
"output",
",",
"label",
")",
".",
"mean",
"(",
")",
"L",
".",
"backward",
"(",
")",
"# Update parameter.",
"trainer",
".",
"step",
"(",
"1",
")",
"log_interval_L",
"+=",
"L",
".",
"asscalar",
"(",
")",
"epoch_L",
"+=",
"L",
".",
"asscalar",
"(",
")",
"if",
"(",
"i",
"+",
"1",
")",
"%",
"args",
".",
"log_interval",
"==",
"0",
":",
"print",
"(",
"'[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps'",
"%",
"(",
"epoch",
",",
"i",
"+",
"1",
",",
"len",
"(",
"train_dataloader",
")",
",",
"log_interval_L",
"/",
"log_interval_sent_num",
",",
"log_interval_wc",
"/",
"1000",
"/",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_log_interval_time",
")",
")",
")",
"# Clear log interval training stats.",
"start_log_interval_time",
"=",
"time",
".",
"time",
"(",
")",
"log_interval_wc",
"=",
"0",
"log_interval_sent_num",
"=",
"0",
"log_interval_L",
"=",
"0",
"end_epoch_time",
"=",
"time",
".",
"time",
"(",
")",
"val_avg_L",
",",
"val_acc",
"=",
"evaluate",
"(",
"net",
",",
"val_dataloader",
")",
"print",
"(",
"'[Epoch %d] train avg loss %g, '",
"'test acc %.4f, test avg loss %g, throughput %gK wps'",
"%",
"(",
"epoch",
",",
"epoch_L",
"/",
"epoch_sent_num",
",",
"val_acc",
",",
"val_avg_L",
",",
"epoch_wc",
"/",
"1000",
"/",
"(",
"end_epoch_time",
"-",
"start_epoch_time",
")",
")",
")",
"if",
"val_acc",
">=",
"best_val_acc",
":",
"print",
"(",
"'Observed Improvement.'",
")",
"best_val_acc",
"=",
"val_acc",
"test_avg_L",
",",
"test_acc",
"=",
"evaluate",
"(",
"net",
",",
"test_dataloader",
")",
"print",
"(",
"'Test loss %g, test acc %.4f'",
"%",
"(",
"test_avg_L",
",",
"test_acc",
")",
")",
"print",
"(",
"'Total time cost %.2fs'",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_pipeline_time",
")",
")",
"return",
"test_acc"
] | Train textCNN model for sentiment analysis. | [
"Train",
"textCNN",
"model",
"for",
"sentiment",
"analysis",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/sentiment_analysis/sentiment_analysis_cnn.py#L114-L184 | train |
dmlc/gluon-nlp | scripts/bert/embedding.py | BertEmbedding.embedding | def embedding(self, sentences, oov_way='avg'):
"""
Get tokens, tokens embedding
Parameters
----------
sentences : List[str]
sentences for encoding.
oov_way : str, default avg.
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding
"""
data_iter = self.data_loader(sentences=sentences)
batches = []
for token_ids, valid_length, token_types in data_iter:
token_ids = token_ids.as_in_context(self.ctx)
valid_length = valid_length.as_in_context(self.ctx)
token_types = token_types.as_in_context(self.ctx)
sequence_outputs = self.bert(token_ids, token_types,
valid_length.astype(self.dtype))
for token_id, sequence_output in zip(token_ids.asnumpy(),
sequence_outputs.asnumpy()):
batches.append((token_id, sequence_output))
return self.oov(batches, oov_way) | python | def embedding(self, sentences, oov_way='avg'):
"""
Get tokens, tokens embedding
Parameters
----------
sentences : List[str]
sentences for encoding.
oov_way : str, default avg.
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding
"""
data_iter = self.data_loader(sentences=sentences)
batches = []
for token_ids, valid_length, token_types in data_iter:
token_ids = token_ids.as_in_context(self.ctx)
valid_length = valid_length.as_in_context(self.ctx)
token_types = token_types.as_in_context(self.ctx)
sequence_outputs = self.bert(token_ids, token_types,
valid_length.astype(self.dtype))
for token_id, sequence_output in zip(token_ids.asnumpy(),
sequence_outputs.asnumpy()):
batches.append((token_id, sequence_output))
return self.oov(batches, oov_way) | [
"def",
"embedding",
"(",
"self",
",",
"sentences",
",",
"oov_way",
"=",
"'avg'",
")",
":",
"data_iter",
"=",
"self",
".",
"data_loader",
"(",
"sentences",
"=",
"sentences",
")",
"batches",
"=",
"[",
"]",
"for",
"token_ids",
",",
"valid_length",
",",
"token_types",
"in",
"data_iter",
":",
"token_ids",
"=",
"token_ids",
".",
"as_in_context",
"(",
"self",
".",
"ctx",
")",
"valid_length",
"=",
"valid_length",
".",
"as_in_context",
"(",
"self",
".",
"ctx",
")",
"token_types",
"=",
"token_types",
".",
"as_in_context",
"(",
"self",
".",
"ctx",
")",
"sequence_outputs",
"=",
"self",
".",
"bert",
"(",
"token_ids",
",",
"token_types",
",",
"valid_length",
".",
"astype",
"(",
"self",
".",
"dtype",
")",
")",
"for",
"token_id",
",",
"sequence_output",
"in",
"zip",
"(",
"token_ids",
".",
"asnumpy",
"(",
")",
",",
"sequence_outputs",
".",
"asnumpy",
"(",
")",
")",
":",
"batches",
".",
"append",
"(",
"(",
"token_id",
",",
"sequence_output",
")",
")",
"return",
"self",
".",
"oov",
"(",
"batches",
",",
"oov_way",
")"
] | Get tokens, tokens embedding
Parameters
----------
sentences : List[str]
sentences for encoding.
oov_way : str, default avg.
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding | [
"Get",
"tokens",
"tokens",
"embedding"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/embedding.py#L111-L139 | train |
dmlc/gluon-nlp | scripts/bert/embedding.py | BertEmbedding.data_loader | def data_loader(self, sentences, shuffle=False):
"""Load, tokenize and prepare the input sentences."""
dataset = BertEmbeddingDataset(sentences, self.transform)
return DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=shuffle) | python | def data_loader(self, sentences, shuffle=False):
"""Load, tokenize and prepare the input sentences."""
dataset = BertEmbeddingDataset(sentences, self.transform)
return DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=shuffle) | [
"def",
"data_loader",
"(",
"self",
",",
"sentences",
",",
"shuffle",
"=",
"False",
")",
":",
"dataset",
"=",
"BertEmbeddingDataset",
"(",
"sentences",
",",
"self",
".",
"transform",
")",
"return",
"DataLoader",
"(",
"dataset",
"=",
"dataset",
",",
"batch_size",
"=",
"self",
".",
"batch_size",
",",
"shuffle",
"=",
"shuffle",
")"
] | Load, tokenize and prepare the input sentences. | [
"Load",
"tokenize",
"and",
"prepare",
"the",
"input",
"sentences",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/embedding.py#L141-L144 | train |
dmlc/gluon-nlp | scripts/bert/embedding.py | BertEmbedding.oov | def oov(self, batches, oov_way='avg'):
"""
How to handle oov. Also filter out [CLS], [SEP] tokens.
Parameters
----------
batches : List[(tokens_id,
sequence_outputs,
pooled_output].
batch token_ids (max_seq_length, ),
sequence_outputs (max_seq_length, dim, ),
pooled_output (dim, )
oov_way : str
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding
"""
sentences = []
for token_ids, sequence_outputs in batches:
tokens = []
tensors = []
oov_len = 1
for token_id, sequence_output in zip(token_ids, sequence_outputs):
if token_id == 1:
# [PAD] token, sequence is finished.
break
if token_id in (2, 3):
# [CLS], [SEP]
continue
token = self.vocab.idx_to_token[token_id]
if token.startswith('##'):
token = token[2:]
tokens[-1] += token
if oov_way == 'last':
tensors[-1] = sequence_output
else:
tensors[-1] += sequence_output
if oov_way == 'avg':
oov_len += 1
else: # iv, avg last oov
if oov_len > 1:
tensors[-1] /= oov_len
oov_len = 1
tokens.append(token)
tensors.append(sequence_output)
if oov_len > 1: # if the whole sentence is one oov, handle this special case
tensors[-1] /= oov_len
sentences.append((tokens, tensors))
return sentences | python | def oov(self, batches, oov_way='avg'):
"""
How to handle oov. Also filter out [CLS], [SEP] tokens.
Parameters
----------
batches : List[(tokens_id,
sequence_outputs,
pooled_output].
batch token_ids (max_seq_length, ),
sequence_outputs (max_seq_length, dim, ),
pooled_output (dim, )
oov_way : str
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding
"""
sentences = []
for token_ids, sequence_outputs in batches:
tokens = []
tensors = []
oov_len = 1
for token_id, sequence_output in zip(token_ids, sequence_outputs):
if token_id == 1:
# [PAD] token, sequence is finished.
break
if token_id in (2, 3):
# [CLS], [SEP]
continue
token = self.vocab.idx_to_token[token_id]
if token.startswith('##'):
token = token[2:]
tokens[-1] += token
if oov_way == 'last':
tensors[-1] = sequence_output
else:
tensors[-1] += sequence_output
if oov_way == 'avg':
oov_len += 1
else: # iv, avg last oov
if oov_len > 1:
tensors[-1] /= oov_len
oov_len = 1
tokens.append(token)
tensors.append(sequence_output)
if oov_len > 1: # if the whole sentence is one oov, handle this special case
tensors[-1] /= oov_len
sentences.append((tokens, tensors))
return sentences | [
"def",
"oov",
"(",
"self",
",",
"batches",
",",
"oov_way",
"=",
"'avg'",
")",
":",
"sentences",
"=",
"[",
"]",
"for",
"token_ids",
",",
"sequence_outputs",
"in",
"batches",
":",
"tokens",
"=",
"[",
"]",
"tensors",
"=",
"[",
"]",
"oov_len",
"=",
"1",
"for",
"token_id",
",",
"sequence_output",
"in",
"zip",
"(",
"token_ids",
",",
"sequence_outputs",
")",
":",
"if",
"token_id",
"==",
"1",
":",
"# [PAD] token, sequence is finished.",
"break",
"if",
"token_id",
"in",
"(",
"2",
",",
"3",
")",
":",
"# [CLS], [SEP]",
"continue",
"token",
"=",
"self",
".",
"vocab",
".",
"idx_to_token",
"[",
"token_id",
"]",
"if",
"token",
".",
"startswith",
"(",
"'##'",
")",
":",
"token",
"=",
"token",
"[",
"2",
":",
"]",
"tokens",
"[",
"-",
"1",
"]",
"+=",
"token",
"if",
"oov_way",
"==",
"'last'",
":",
"tensors",
"[",
"-",
"1",
"]",
"=",
"sequence_output",
"else",
":",
"tensors",
"[",
"-",
"1",
"]",
"+=",
"sequence_output",
"if",
"oov_way",
"==",
"'avg'",
":",
"oov_len",
"+=",
"1",
"else",
":",
"# iv, avg last oov",
"if",
"oov_len",
">",
"1",
":",
"tensors",
"[",
"-",
"1",
"]",
"/=",
"oov_len",
"oov_len",
"=",
"1",
"tokens",
".",
"append",
"(",
"token",
")",
"tensors",
".",
"append",
"(",
"sequence_output",
")",
"if",
"oov_len",
">",
"1",
":",
"# if the whole sentence is one oov, handle this special case",
"tensors",
"[",
"-",
"1",
"]",
"/=",
"oov_len",
"sentences",
".",
"append",
"(",
"(",
"tokens",
",",
"tensors",
")",
")",
"return",
"sentences"
] | How to handle oov. Also filter out [CLS], [SEP] tokens.
Parameters
----------
batches : List[(tokens_id,
sequence_outputs,
pooled_output].
batch token_ids (max_seq_length, ),
sequence_outputs (max_seq_length, dim, ),
pooled_output (dim, )
oov_way : str
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding | [
"How",
"to",
"handle",
"oov",
".",
"Also",
"filter",
"out",
"[",
"CLS",
"]",
"[",
"SEP",
"]",
"tokens",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/embedding.py#L146-L198 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | get_bert_model | def get_bert_model(model_name=None, dataset_name=None, vocab=None,
pretrained=True, ctx=mx.cpu(),
use_pooler=True, use_decoder=True, use_classifier=True,
output_attention=False, output_all_encodings=False,
root=os.path.join(get_home_dir(), 'models'), **kwargs):
"""Any BERT pretrained model.
Parameters
----------
model_name : str or None, default None
Options include 'bert_24_1024_16' and 'bert_12_768_12'.
dataset_name : str or None, default None
Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased'
for both bert_24_1024_16 and bert_12_768_12.
'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased'
for bert_12_768_12 only.
vocab : gluonnlp.vocab.BERTVocab or None, default None
Vocabulary for the dataset. Must be provided if dataset is not specified.
pretrained : bool, default True
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
use_pooler : bool, default True
Whether to include the pooler which converts the encoded sequence tensor of shape
(batch_size, seq_length, units) to a tensor of shape (batch_size, units)
for for segment level classification task.
use_decoder : bool, default True
Whether to include the decoder for masked language model prediction.
use_classifier : bool, default True
Whether to include the classifier for next sentence classification.
output_attention : bool, default False
Whether to include attention weights of each encoding cell to the output.
output_all_encodings : bool, default False
Whether to output encodings of all encoder cells.
Returns
-------
BERTModel, gluonnlp.vocab.BERTVocab
"""
predefined_args = bert_hparams[model_name]
mutable_args = ['use_residual', 'dropout', 'embed_dropout', 'word_embed']
mutable_args = frozenset(mutable_args)
assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \
'Cannot override predefined model settings.'
predefined_args.update(kwargs)
# encoder
encoder = BERTEncoder(attention_cell=predefined_args['attention_cell'],
num_layers=predefined_args['num_layers'],
units=predefined_args['units'],
hidden_size=predefined_args['hidden_size'],
max_length=predefined_args['max_length'],
num_heads=predefined_args['num_heads'],
scaled=predefined_args['scaled'],
dropout=predefined_args['dropout'],
output_attention=output_attention,
output_all_encodings=output_all_encodings,
use_residual=predefined_args['use_residual'])
# bert_vocab
from ..vocab import BERTVocab
if dataset_name in ['wiki_cn', 'wiki_multilingual']:
warnings.warn('wiki_cn/wiki_multilingual will be deprecated.'
' Please use wiki_cn_cased/wiki_multilingual_uncased instead.')
bert_vocab = _load_vocab(dataset_name, vocab, root, cls=BERTVocab)
# BERT
net = BERTModel(encoder, len(bert_vocab),
token_type_vocab_size=predefined_args['token_type_vocab_size'],
units=predefined_args['units'],
embed_size=predefined_args['embed_size'],
embed_dropout=predefined_args['embed_dropout'],
word_embed=predefined_args['word_embed'],
use_pooler=use_pooler, use_decoder=use_decoder,
use_classifier=use_classifier)
if pretrained:
ignore_extra = not (use_pooler and use_decoder and use_classifier)
_load_pretrained_params(net, model_name, dataset_name, root, ctx,
ignore_extra=ignore_extra)
return net, bert_vocab | python | def get_bert_model(model_name=None, dataset_name=None, vocab=None,
pretrained=True, ctx=mx.cpu(),
use_pooler=True, use_decoder=True, use_classifier=True,
output_attention=False, output_all_encodings=False,
root=os.path.join(get_home_dir(), 'models'), **kwargs):
"""Any BERT pretrained model.
Parameters
----------
model_name : str or None, default None
Options include 'bert_24_1024_16' and 'bert_12_768_12'.
dataset_name : str or None, default None
Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased'
for both bert_24_1024_16 and bert_12_768_12.
'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased'
for bert_12_768_12 only.
vocab : gluonnlp.vocab.BERTVocab or None, default None
Vocabulary for the dataset. Must be provided if dataset is not specified.
pretrained : bool, default True
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
use_pooler : bool, default True
Whether to include the pooler which converts the encoded sequence tensor of shape
(batch_size, seq_length, units) to a tensor of shape (batch_size, units)
for for segment level classification task.
use_decoder : bool, default True
Whether to include the decoder for masked language model prediction.
use_classifier : bool, default True
Whether to include the classifier for next sentence classification.
output_attention : bool, default False
Whether to include attention weights of each encoding cell to the output.
output_all_encodings : bool, default False
Whether to output encodings of all encoder cells.
Returns
-------
BERTModel, gluonnlp.vocab.BERTVocab
"""
predefined_args = bert_hparams[model_name]
mutable_args = ['use_residual', 'dropout', 'embed_dropout', 'word_embed']
mutable_args = frozenset(mutable_args)
assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \
'Cannot override predefined model settings.'
predefined_args.update(kwargs)
# encoder
encoder = BERTEncoder(attention_cell=predefined_args['attention_cell'],
num_layers=predefined_args['num_layers'],
units=predefined_args['units'],
hidden_size=predefined_args['hidden_size'],
max_length=predefined_args['max_length'],
num_heads=predefined_args['num_heads'],
scaled=predefined_args['scaled'],
dropout=predefined_args['dropout'],
output_attention=output_attention,
output_all_encodings=output_all_encodings,
use_residual=predefined_args['use_residual'])
# bert_vocab
from ..vocab import BERTVocab
if dataset_name in ['wiki_cn', 'wiki_multilingual']:
warnings.warn('wiki_cn/wiki_multilingual will be deprecated.'
' Please use wiki_cn_cased/wiki_multilingual_uncased instead.')
bert_vocab = _load_vocab(dataset_name, vocab, root, cls=BERTVocab)
# BERT
net = BERTModel(encoder, len(bert_vocab),
token_type_vocab_size=predefined_args['token_type_vocab_size'],
units=predefined_args['units'],
embed_size=predefined_args['embed_size'],
embed_dropout=predefined_args['embed_dropout'],
word_embed=predefined_args['word_embed'],
use_pooler=use_pooler, use_decoder=use_decoder,
use_classifier=use_classifier)
if pretrained:
ignore_extra = not (use_pooler and use_decoder and use_classifier)
_load_pretrained_params(net, model_name, dataset_name, root, ctx,
ignore_extra=ignore_extra)
return net, bert_vocab | [
"def",
"get_bert_model",
"(",
"model_name",
"=",
"None",
",",
"dataset_name",
"=",
"None",
",",
"vocab",
"=",
"None",
",",
"pretrained",
"=",
"True",
",",
"ctx",
"=",
"mx",
".",
"cpu",
"(",
")",
",",
"use_pooler",
"=",
"True",
",",
"use_decoder",
"=",
"True",
",",
"use_classifier",
"=",
"True",
",",
"output_attention",
"=",
"False",
",",
"output_all_encodings",
"=",
"False",
",",
"root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_home_dir",
"(",
")",
",",
"'models'",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"predefined_args",
"=",
"bert_hparams",
"[",
"model_name",
"]",
"mutable_args",
"=",
"[",
"'use_residual'",
",",
"'dropout'",
",",
"'embed_dropout'",
",",
"'word_embed'",
"]",
"mutable_args",
"=",
"frozenset",
"(",
"mutable_args",
")",
"assert",
"all",
"(",
"(",
"k",
"not",
"in",
"kwargs",
"or",
"k",
"in",
"mutable_args",
")",
"for",
"k",
"in",
"predefined_args",
")",
",",
"'Cannot override predefined model settings.'",
"predefined_args",
".",
"update",
"(",
"kwargs",
")",
"# encoder",
"encoder",
"=",
"BERTEncoder",
"(",
"attention_cell",
"=",
"predefined_args",
"[",
"'attention_cell'",
"]",
",",
"num_layers",
"=",
"predefined_args",
"[",
"'num_layers'",
"]",
",",
"units",
"=",
"predefined_args",
"[",
"'units'",
"]",
",",
"hidden_size",
"=",
"predefined_args",
"[",
"'hidden_size'",
"]",
",",
"max_length",
"=",
"predefined_args",
"[",
"'max_length'",
"]",
",",
"num_heads",
"=",
"predefined_args",
"[",
"'num_heads'",
"]",
",",
"scaled",
"=",
"predefined_args",
"[",
"'scaled'",
"]",
",",
"dropout",
"=",
"predefined_args",
"[",
"'dropout'",
"]",
",",
"output_attention",
"=",
"output_attention",
",",
"output_all_encodings",
"=",
"output_all_encodings",
",",
"use_residual",
"=",
"predefined_args",
"[",
"'use_residual'",
"]",
")",
"# bert_vocab",
"from",
".",
".",
"vocab",
"import",
"BERTVocab",
"if",
"dataset_name",
"in",
"[",
"'wiki_cn'",
",",
"'wiki_multilingual'",
"]",
":",
"warnings",
".",
"warn",
"(",
"'wiki_cn/wiki_multilingual will be deprecated.'",
"' Please use wiki_cn_cased/wiki_multilingual_uncased instead.'",
")",
"bert_vocab",
"=",
"_load_vocab",
"(",
"dataset_name",
",",
"vocab",
",",
"root",
",",
"cls",
"=",
"BERTVocab",
")",
"# BERT",
"net",
"=",
"BERTModel",
"(",
"encoder",
",",
"len",
"(",
"bert_vocab",
")",
",",
"token_type_vocab_size",
"=",
"predefined_args",
"[",
"'token_type_vocab_size'",
"]",
",",
"units",
"=",
"predefined_args",
"[",
"'units'",
"]",
",",
"embed_size",
"=",
"predefined_args",
"[",
"'embed_size'",
"]",
",",
"embed_dropout",
"=",
"predefined_args",
"[",
"'embed_dropout'",
"]",
",",
"word_embed",
"=",
"predefined_args",
"[",
"'word_embed'",
"]",
",",
"use_pooler",
"=",
"use_pooler",
",",
"use_decoder",
"=",
"use_decoder",
",",
"use_classifier",
"=",
"use_classifier",
")",
"if",
"pretrained",
":",
"ignore_extra",
"=",
"not",
"(",
"use_pooler",
"and",
"use_decoder",
"and",
"use_classifier",
")",
"_load_pretrained_params",
"(",
"net",
",",
"model_name",
",",
"dataset_name",
",",
"root",
",",
"ctx",
",",
"ignore_extra",
"=",
"ignore_extra",
")",
"return",
"net",
",",
"bert_vocab"
] | Any BERT pretrained model.
Parameters
----------
model_name : str or None, default None
Options include 'bert_24_1024_16' and 'bert_12_768_12'.
dataset_name : str or None, default None
Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased'
for both bert_24_1024_16 and bert_12_768_12.
'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased'
for bert_12_768_12 only.
vocab : gluonnlp.vocab.BERTVocab or None, default None
Vocabulary for the dataset. Must be provided if dataset is not specified.
pretrained : bool, default True
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
use_pooler : bool, default True
Whether to include the pooler which converts the encoded sequence tensor of shape
(batch_size, seq_length, units) to a tensor of shape (batch_size, units)
for for segment level classification task.
use_decoder : bool, default True
Whether to include the decoder for masked language model prediction.
use_classifier : bool, default True
Whether to include the classifier for next sentence classification.
output_attention : bool, default False
Whether to include attention weights of each encoding cell to the output.
output_all_encodings : bool, default False
Whether to output encodings of all encoder cells.
Returns
-------
BERTModel, gluonnlp.vocab.BERTVocab | [
"Any",
"BERT",
"pretrained",
"model",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L630-L709 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | BERTLayerNorm.hybrid_forward | def hybrid_forward(self, F, data, gamma, beta):
"""forward computation."""
# TODO(haibin): LayerNorm does not support fp16 safe reduction. Issue is tracked at:
# https://github.com/apache/incubator-mxnet/issues/14073
if self._dtype:
data = data.astype('float32')
gamma = gamma.astype('float32')
beta = beta.astype('float32')
norm_data = F.LayerNorm(data, gamma=gamma, beta=beta, axis=self._axis, eps=self._epsilon)
if self._dtype:
norm_data = norm_data.astype(self._dtype)
return norm_data | python | def hybrid_forward(self, F, data, gamma, beta):
"""forward computation."""
# TODO(haibin): LayerNorm does not support fp16 safe reduction. Issue is tracked at:
# https://github.com/apache/incubator-mxnet/issues/14073
if self._dtype:
data = data.astype('float32')
gamma = gamma.astype('float32')
beta = beta.astype('float32')
norm_data = F.LayerNorm(data, gamma=gamma, beta=beta, axis=self._axis, eps=self._epsilon)
if self._dtype:
norm_data = norm_data.astype(self._dtype)
return norm_data | [
"def",
"hybrid_forward",
"(",
"self",
",",
"F",
",",
"data",
",",
"gamma",
",",
"beta",
")",
":",
"# TODO(haibin): LayerNorm does not support fp16 safe reduction. Issue is tracked at:",
"# https://github.com/apache/incubator-mxnet/issues/14073",
"if",
"self",
".",
"_dtype",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"'float32'",
")",
"gamma",
"=",
"gamma",
".",
"astype",
"(",
"'float32'",
")",
"beta",
"=",
"beta",
".",
"astype",
"(",
"'float32'",
")",
"norm_data",
"=",
"F",
".",
"LayerNorm",
"(",
"data",
",",
"gamma",
"=",
"gamma",
",",
"beta",
"=",
"beta",
",",
"axis",
"=",
"self",
".",
"_axis",
",",
"eps",
"=",
"self",
".",
"_epsilon",
")",
"if",
"self",
".",
"_dtype",
":",
"norm_data",
"=",
"norm_data",
".",
"astype",
"(",
"self",
".",
"_dtype",
")",
"return",
"norm_data"
] | forward computation. | [
"forward",
"computation",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L59-L70 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | BERTModel._get_classifier | def _get_classifier(self, prefix):
""" Construct a decoder for the next sentence prediction task """
with self.name_scope():
classifier = nn.Dense(2, prefix=prefix)
return classifier | python | def _get_classifier(self, prefix):
""" Construct a decoder for the next sentence prediction task """
with self.name_scope():
classifier = nn.Dense(2, prefix=prefix)
return classifier | [
"def",
"_get_classifier",
"(",
"self",
",",
"prefix",
")",
":",
"with",
"self",
".",
"name_scope",
"(",
")",
":",
"classifier",
"=",
"nn",
".",
"Dense",
"(",
"2",
",",
"prefix",
"=",
"prefix",
")",
"return",
"classifier"
] | Construct a decoder for the next sentence prediction task | [
"Construct",
"a",
"decoder",
"for",
"the",
"next",
"sentence",
"prediction",
"task"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L364-L368 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | BERTModel._get_decoder | def _get_decoder(self, units, vocab_size, embed, prefix):
""" Construct a decoder for the masked language model task """
with self.name_scope():
decoder = nn.HybridSequential(prefix=prefix)
decoder.add(nn.Dense(units, flatten=False))
decoder.add(GELU())
decoder.add(BERTLayerNorm(in_channels=units))
decoder.add(nn.Dense(vocab_size, flatten=False, params=embed.collect_params()))
assert decoder[3].weight == list(embed.collect_params().values())[0], \
'The weights of word embedding are not tied with those of decoder'
return decoder | python | def _get_decoder(self, units, vocab_size, embed, prefix):
""" Construct a decoder for the masked language model task """
with self.name_scope():
decoder = nn.HybridSequential(prefix=prefix)
decoder.add(nn.Dense(units, flatten=False))
decoder.add(GELU())
decoder.add(BERTLayerNorm(in_channels=units))
decoder.add(nn.Dense(vocab_size, flatten=False, params=embed.collect_params()))
assert decoder[3].weight == list(embed.collect_params().values())[0], \
'The weights of word embedding are not tied with those of decoder'
return decoder | [
"def",
"_get_decoder",
"(",
"self",
",",
"units",
",",
"vocab_size",
",",
"embed",
",",
"prefix",
")",
":",
"with",
"self",
".",
"name_scope",
"(",
")",
":",
"decoder",
"=",
"nn",
".",
"HybridSequential",
"(",
"prefix",
"=",
"prefix",
")",
"decoder",
".",
"add",
"(",
"nn",
".",
"Dense",
"(",
"units",
",",
"flatten",
"=",
"False",
")",
")",
"decoder",
".",
"add",
"(",
"GELU",
"(",
")",
")",
"decoder",
".",
"add",
"(",
"BERTLayerNorm",
"(",
"in_channels",
"=",
"units",
")",
")",
"decoder",
".",
"add",
"(",
"nn",
".",
"Dense",
"(",
"vocab_size",
",",
"flatten",
"=",
"False",
",",
"params",
"=",
"embed",
".",
"collect_params",
"(",
")",
")",
")",
"assert",
"decoder",
"[",
"3",
"]",
".",
"weight",
"==",
"list",
"(",
"embed",
".",
"collect_params",
"(",
")",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"'The weights of word embedding are not tied with those of decoder'",
"return",
"decoder"
] | Construct a decoder for the masked language model task | [
"Construct",
"a",
"decoder",
"for",
"the",
"masked",
"language",
"model",
"task"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L370-L380 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | BERTModel._get_embed | def _get_embed(self, embed, vocab_size, embed_size, initializer, dropout, prefix):
""" Construct an embedding block. """
if embed is None:
assert embed_size is not None, '"embed_size" cannot be None if "word_embed" or ' \
'token_type_embed is not given.'
with self.name_scope():
embed = nn.HybridSequential(prefix=prefix)
with embed.name_scope():
embed.add(nn.Embedding(input_dim=vocab_size, output_dim=embed_size,
weight_initializer=initializer))
if dropout:
embed.add(nn.Dropout(rate=dropout))
assert isinstance(embed, Block)
return embed | python | def _get_embed(self, embed, vocab_size, embed_size, initializer, dropout, prefix):
""" Construct an embedding block. """
if embed is None:
assert embed_size is not None, '"embed_size" cannot be None if "word_embed" or ' \
'token_type_embed is not given.'
with self.name_scope():
embed = nn.HybridSequential(prefix=prefix)
with embed.name_scope():
embed.add(nn.Embedding(input_dim=vocab_size, output_dim=embed_size,
weight_initializer=initializer))
if dropout:
embed.add(nn.Dropout(rate=dropout))
assert isinstance(embed, Block)
return embed | [
"def",
"_get_embed",
"(",
"self",
",",
"embed",
",",
"vocab_size",
",",
"embed_size",
",",
"initializer",
",",
"dropout",
",",
"prefix",
")",
":",
"if",
"embed",
"is",
"None",
":",
"assert",
"embed_size",
"is",
"not",
"None",
",",
"'\"embed_size\" cannot be None if \"word_embed\" or '",
"'token_type_embed is not given.'",
"with",
"self",
".",
"name_scope",
"(",
")",
":",
"embed",
"=",
"nn",
".",
"HybridSequential",
"(",
"prefix",
"=",
"prefix",
")",
"with",
"embed",
".",
"name_scope",
"(",
")",
":",
"embed",
".",
"add",
"(",
"nn",
".",
"Embedding",
"(",
"input_dim",
"=",
"vocab_size",
",",
"output_dim",
"=",
"embed_size",
",",
"weight_initializer",
"=",
"initializer",
")",
")",
"if",
"dropout",
":",
"embed",
".",
"add",
"(",
"nn",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
")",
")",
"assert",
"isinstance",
"(",
"embed",
",",
"Block",
")",
"return",
"embed"
] | Construct an embedding block. | [
"Construct",
"an",
"embedding",
"block",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L382-L395 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | BERTModel._get_pooler | def _get_pooler(self, units, prefix):
""" Construct pooler.
The pooler slices and projects the hidden output of first token
in the sequence for segment level classification.
"""
with self.name_scope():
pooler = nn.Dense(units=units, flatten=False, activation='tanh',
prefix=prefix)
return pooler | python | def _get_pooler(self, units, prefix):
""" Construct pooler.
The pooler slices and projects the hidden output of first token
in the sequence for segment level classification.
"""
with self.name_scope():
pooler = nn.Dense(units=units, flatten=False, activation='tanh',
prefix=prefix)
return pooler | [
"def",
"_get_pooler",
"(",
"self",
",",
"units",
",",
"prefix",
")",
":",
"with",
"self",
".",
"name_scope",
"(",
")",
":",
"pooler",
"=",
"nn",
".",
"Dense",
"(",
"units",
"=",
"units",
",",
"flatten",
"=",
"False",
",",
"activation",
"=",
"'tanh'",
",",
"prefix",
"=",
"prefix",
")",
"return",
"pooler"
] | Construct pooler.
The pooler slices and projects the hidden output of first token
in the sequence for segment level classification. | [
"Construct",
"pooler",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L397-L407 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | BERTModel._encode_sequence | def _encode_sequence(self, inputs, token_types, valid_length=None):
"""Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT model.
"""
# embedding
word_embedding = self.word_embed(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = word_embedding + type_embedding
# encoding
outputs, additional_outputs = self.encoder(embedding, None, valid_length)
return outputs, additional_outputs | python | def _encode_sequence(self, inputs, token_types, valid_length=None):
"""Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT model.
"""
# embedding
word_embedding = self.word_embed(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = word_embedding + type_embedding
# encoding
outputs, additional_outputs = self.encoder(embedding, None, valid_length)
return outputs, additional_outputs | [
"def",
"_encode_sequence",
"(",
"self",
",",
"inputs",
",",
"token_types",
",",
"valid_length",
"=",
"None",
")",
":",
"# embedding",
"word_embedding",
"=",
"self",
".",
"word_embed",
"(",
"inputs",
")",
"type_embedding",
"=",
"self",
".",
"token_type_embed",
"(",
"token_types",
")",
"embedding",
"=",
"word_embedding",
"+",
"type_embedding",
"# encoding",
"outputs",
",",
"additional_outputs",
"=",
"self",
".",
"encoder",
"(",
"embedding",
",",
"None",
",",
"valid_length",
")",
"return",
"outputs",
",",
"additional_outputs"
] | Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT model. | [
"Generate",
"the",
"representation",
"given",
"the",
"input",
"sequences",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L440-L451 | train |
dmlc/gluon-nlp | src/gluonnlp/model/bert.py | BERTModel._decode | def _decode(self, sequence, masked_positions):
"""Generate unnormalized prediction for the masked language model task.
This is only used for pre-training the BERT model.
Inputs:
- **sequence**: input tensor of sequence encodings.
Shape (batch_size, seq_length, units).
- **masked_positions**: input tensor of position of tokens for masked LM decoding.
Shape (batch_size, num_masked_positions). For each sample in the batch, the values
in this tensor must not be out of bound considering the length of the sequence.
Outputs:
- **masked_lm_outputs**: output tensor of token predictions for target masked_positions.
Shape (batch_size, num_masked_positions, vocab_size).
"""
batch_size = sequence.shape[0]
num_masked_positions = masked_positions.shape[1]
ctx = masked_positions.context
dtype = masked_positions.dtype
# batch_idx = [0,0,0,1,1,1,2,2,2...]
# masked_positions = [1,2,4,0,3,4,2,3,5...]
batch_idx = mx.nd.arange(0, batch_size, repeat=num_masked_positions, dtype=dtype, ctx=ctx)
batch_idx = batch_idx.reshape((1, -1))
masked_positions = masked_positions.reshape((1, -1))
position_idx = mx.nd.Concat(batch_idx, masked_positions, dim=0)
encoded = mx.nd.gather_nd(sequence, position_idx)
encoded = encoded.reshape((batch_size, num_masked_positions, sequence.shape[-1]))
decoded = self.decoder(encoded)
return decoded | python | def _decode(self, sequence, masked_positions):
"""Generate unnormalized prediction for the masked language model task.
This is only used for pre-training the BERT model.
Inputs:
- **sequence**: input tensor of sequence encodings.
Shape (batch_size, seq_length, units).
- **masked_positions**: input tensor of position of tokens for masked LM decoding.
Shape (batch_size, num_masked_positions). For each sample in the batch, the values
in this tensor must not be out of bound considering the length of the sequence.
Outputs:
- **masked_lm_outputs**: output tensor of token predictions for target masked_positions.
Shape (batch_size, num_masked_positions, vocab_size).
"""
batch_size = sequence.shape[0]
num_masked_positions = masked_positions.shape[1]
ctx = masked_positions.context
dtype = masked_positions.dtype
# batch_idx = [0,0,0,1,1,1,2,2,2...]
# masked_positions = [1,2,4,0,3,4,2,3,5...]
batch_idx = mx.nd.arange(0, batch_size, repeat=num_masked_positions, dtype=dtype, ctx=ctx)
batch_idx = batch_idx.reshape((1, -1))
masked_positions = masked_positions.reshape((1, -1))
position_idx = mx.nd.Concat(batch_idx, masked_positions, dim=0)
encoded = mx.nd.gather_nd(sequence, position_idx)
encoded = encoded.reshape((batch_size, num_masked_positions, sequence.shape[-1]))
decoded = self.decoder(encoded)
return decoded | [
"def",
"_decode",
"(",
"self",
",",
"sequence",
",",
"masked_positions",
")",
":",
"batch_size",
"=",
"sequence",
".",
"shape",
"[",
"0",
"]",
"num_masked_positions",
"=",
"masked_positions",
".",
"shape",
"[",
"1",
"]",
"ctx",
"=",
"masked_positions",
".",
"context",
"dtype",
"=",
"masked_positions",
".",
"dtype",
"# batch_idx = [0,0,0,1,1,1,2,2,2...]",
"# masked_positions = [1,2,4,0,3,4,2,3,5...]",
"batch_idx",
"=",
"mx",
".",
"nd",
".",
"arange",
"(",
"0",
",",
"batch_size",
",",
"repeat",
"=",
"num_masked_positions",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"ctx",
")",
"batch_idx",
"=",
"batch_idx",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
"masked_positions",
"=",
"masked_positions",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
"position_idx",
"=",
"mx",
".",
"nd",
".",
"Concat",
"(",
"batch_idx",
",",
"masked_positions",
",",
"dim",
"=",
"0",
")",
"encoded",
"=",
"mx",
".",
"nd",
".",
"gather_nd",
"(",
"sequence",
",",
"position_idx",
")",
"encoded",
"=",
"encoded",
".",
"reshape",
"(",
"(",
"batch_size",
",",
"num_masked_positions",
",",
"sequence",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"decoded",
"=",
"self",
".",
"decoder",
"(",
"encoded",
")",
"return",
"decoded"
] | Generate unnormalized prediction for the masked language model task.
This is only used for pre-training the BERT model.
Inputs:
- **sequence**: input tensor of sequence encodings.
Shape (batch_size, seq_length, units).
- **masked_positions**: input tensor of position of tokens for masked LM decoding.
Shape (batch_size, num_masked_positions). For each sample in the batch, the values
in this tensor must not be out of bound considering the length of the sequence.
Outputs:
- **masked_lm_outputs**: output tensor of token predictions for target masked_positions.
Shape (batch_size, num_masked_positions, vocab_size). | [
"Generate",
"unnormalized",
"prediction",
"for",
"the",
"masked",
"language",
"model",
"task",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L461-L490 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _ngrams | def _ngrams(segment, n):
"""Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-gram occurred.
"""
ngram_counts = Counter()
for i in range(0, len(segment) - n + 1):
ngram = tuple(segment[i:i + n])
ngram_counts[ngram] += 1
return ngram_counts | python | def _ngrams(segment, n):
"""Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-gram occurred.
"""
ngram_counts = Counter()
for i in range(0, len(segment) - n + 1):
ngram = tuple(segment[i:i + n])
ngram_counts[ngram] += 1
return ngram_counts | [
"def",
"_ngrams",
"(",
"segment",
",",
"n",
")",
":",
"ngram_counts",
"=",
"Counter",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"segment",
")",
"-",
"n",
"+",
"1",
")",
":",
"ngram",
"=",
"tuple",
"(",
"segment",
"[",
"i",
":",
"i",
"+",
"n",
"]",
")",
"ngram_counts",
"[",
"ngram",
"]",
"+=",
"1",
"return",
"ngram_counts"
] | Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-gram occurred. | [
"Extracts",
"n",
"-",
"grams",
"from",
"an",
"input",
"segment",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L32-L51 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _bpe_to_words | def _bpe_to_words(sentence, delimiter='@@'):
"""Convert a sequence of bpe words into sentence."""
words = []
word = ''
delimiter_len = len(delimiter)
for subwords in sentence:
if len(subwords) >= delimiter_len and subwords[-delimiter_len:] == delimiter:
word += subwords[:-delimiter_len]
else:
word += subwords
words.append(word)
word = ''
return words | python | def _bpe_to_words(sentence, delimiter='@@'):
"""Convert a sequence of bpe words into sentence."""
words = []
word = ''
delimiter_len = len(delimiter)
for subwords in sentence:
if len(subwords) >= delimiter_len and subwords[-delimiter_len:] == delimiter:
word += subwords[:-delimiter_len]
else:
word += subwords
words.append(word)
word = ''
return words | [
"def",
"_bpe_to_words",
"(",
"sentence",
",",
"delimiter",
"=",
"'@@'",
")",
":",
"words",
"=",
"[",
"]",
"word",
"=",
"''",
"delimiter_len",
"=",
"len",
"(",
"delimiter",
")",
"for",
"subwords",
"in",
"sentence",
":",
"if",
"len",
"(",
"subwords",
")",
">=",
"delimiter_len",
"and",
"subwords",
"[",
"-",
"delimiter_len",
":",
"]",
"==",
"delimiter",
":",
"word",
"+=",
"subwords",
"[",
":",
"-",
"delimiter_len",
"]",
"else",
":",
"word",
"+=",
"subwords",
"words",
".",
"append",
"(",
"word",
")",
"word",
"=",
"''",
"return",
"words"
] | Convert a sequence of bpe words into sentence. | [
"Convert",
"a",
"sequence",
"of",
"bpe",
"words",
"into",
"sentence",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L61-L73 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _tokenize_mteval_13a | def _tokenize_mteval_13a(segment):
r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string
"""
norm = segment.rstrip()
norm = norm.replace('<skipped>', '')
norm = norm.replace('-\n', '')
norm = norm.replace('\n', ' ')
norm = norm.replace('"', '"')
norm = norm.replace('&', '&')
norm = norm.replace('<', '<')
norm = norm.replace('>', '>')
norm = u' {} '.format(norm)
norm = re.sub(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])', ' \\1 ', norm)
norm = re.sub(r'([^0-9])([\.,])', '\\1 \\2 ', norm)
norm = re.sub(r'([\.,])([^0-9])', ' \\1 \\2', norm)
norm = re.sub(r'([0-9])(-)', '\\1 \\2 ', norm)
norm = re.sub(r'\s+', ' ', norm)
norm = re.sub(r'^\s+', '', norm)
norm = re.sub(r'\s+$', '', norm)
return norm | python | def _tokenize_mteval_13a(segment):
r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string
"""
norm = segment.rstrip()
norm = norm.replace('<skipped>', '')
norm = norm.replace('-\n', '')
norm = norm.replace('\n', ' ')
norm = norm.replace('"', '"')
norm = norm.replace('&', '&')
norm = norm.replace('<', '<')
norm = norm.replace('>', '>')
norm = u' {} '.format(norm)
norm = re.sub(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])', ' \\1 ', norm)
norm = re.sub(r'([^0-9])([\.,])', '\\1 \\2 ', norm)
norm = re.sub(r'([\.,])([^0-9])', ' \\1 \\2', norm)
norm = re.sub(r'([0-9])(-)', '\\1 \\2 ', norm)
norm = re.sub(r'\s+', ' ', norm)
norm = re.sub(r'^\s+', '', norm)
norm = re.sub(r'\s+$', '', norm)
return norm | [
"def",
"_tokenize_mteval_13a",
"(",
"segment",
")",
":",
"norm",
"=",
"segment",
".",
"rstrip",
"(",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'<skipped>'",
",",
"''",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'-\\n'",
",",
"''",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'"'",
",",
"'\"'",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'&'",
",",
"'&'",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'<'",
",",
"'<'",
")",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'>'",
",",
"'>'",
")",
"norm",
"=",
"u' {} '",
".",
"format",
"(",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([\\{-\\~\\[-\\` -\\&\\(-\\+\\:-\\@\\/])'",
",",
"' \\\\1 '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([^0-9])([\\.,])'",
",",
"'\\\\1 \\\\2 '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([\\.,])([^0-9])'",
",",
"' \\\\1 \\\\2'",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'([0-9])(-)'",
",",
"'\\\\1 \\\\2 '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"' '",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'^\\s+'",
",",
"''",
",",
"norm",
")",
"norm",
"=",
"re",
".",
"sub",
"(",
"r'\\s+$'",
",",
"''",
",",
"norm",
")",
"return",
"norm"
] | r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string | [
"r",
"Tokenizes",
"a",
"string",
"following",
"the",
"tokenizer",
"in",
"mteval",
"-",
"v13a",
".",
"pl",
".",
"See",
"https",
":",
"//",
"github",
".",
"com",
"/",
"moses",
"-",
"smt",
"/",
"mosesdecoder",
"/",
"blob",
"/",
"master",
"/",
"scripts",
"/",
"generic",
"/",
"mteval",
"-",
"v14",
".",
"pl#L917",
"-",
"L942",
"Parameters",
"----------",
"segment",
":",
"str",
"A",
"string",
"to",
"be",
"tokenized"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L76-L110 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _tokenize_mteval_v14_intl | def _tokenize_mteval_v14_intl(segment):
r"""Tokenize a string following following the international tokenizer in mteval-v14a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L954-L983
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string
"""
segment = segment.rstrip()
segment = unicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', segment)
segment = unicodeRegex.punct_nondigit_re.sub(r' \1 \2', segment)
segment = unicodeRegex.symbol_re.sub(r' \1 ', segment)
return segment.strip() | python | def _tokenize_mteval_v14_intl(segment):
r"""Tokenize a string following following the international tokenizer in mteval-v14a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L954-L983
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string
"""
segment = segment.rstrip()
segment = unicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', segment)
segment = unicodeRegex.punct_nondigit_re.sub(r' \1 \2', segment)
segment = unicodeRegex.symbol_re.sub(r' \1 ', segment)
return segment.strip() | [
"def",
"_tokenize_mteval_v14_intl",
"(",
"segment",
")",
":",
"segment",
"=",
"segment",
".",
"rstrip",
"(",
")",
"segment",
"=",
"unicodeRegex",
".",
"nondigit_punct_re",
".",
"sub",
"(",
"r'\\1 \\2 '",
",",
"segment",
")",
"segment",
"=",
"unicodeRegex",
".",
"punct_nondigit_re",
".",
"sub",
"(",
"r' \\1 \\2'",
",",
"segment",
")",
"segment",
"=",
"unicodeRegex",
".",
"symbol_re",
".",
"sub",
"(",
"r' \\1 '",
",",
"segment",
")",
"return",
"segment",
".",
"strip",
"(",
")"
] | r"""Tokenize a string following following the international tokenizer in mteval-v14a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L954-L983
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string | [
"r",
"Tokenize",
"a",
"string",
"following",
"following",
"the",
"international",
"tokenizer",
"in",
"mteval",
"-",
"v14a",
".",
"pl",
".",
"See",
"https",
":",
"//",
"github",
".",
"com",
"/",
"moses",
"-",
"smt",
"/",
"mosesdecoder",
"/",
"blob",
"/",
"master",
"/",
"scripts",
"/",
"generic",
"/",
"mteval",
"-",
"v14",
".",
"pl#L954",
"-",
"L983"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L130-L148 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | compute_bleu | def compute_bleu(reference_corpus_list, translation_corpus, tokenized=True,
tokenizer='13a', max_n=4, smooth=False, lower_case=False,
bpe=False, split_compound_word=False):
r"""Compute bleu score of translation against references.
Parameters
----------
reference_corpus_list: list of list(list(str)) or list of list(str)
list of list(list(str)): tokenized references
list of list(str): plain text
List of references for each translation.
translation_corpus: list(list(str)) or list(str)
list(list(str)): tokenized translation
list(str): plain text
Translations to score.
tokenized: bool, default True
Whether the inputs has been tokenized.
tokenizer: str or None, default '13a'
'13a': follow the tokenizer in mteval-v13a.pl
'intl': follow the international tokenizer in mteval-v14.pl
None: identity mapping on the string.
This option is ignored if tokenized is True
max_n: int, default 4
Maximum n-gram order to use when computing BLEU score.
smooth: bool, default False
Whether or not to compute smoothed bleu score.
lower_case: bool, default False
Whether or not to use lower case of tokens
split_compound_word: bool, default False
Whether or not to split compound words
"rich-text format" --> rich ##AT##-##AT## text format.
bpe: bool, default False
Whether or not the inputs are in BPE format
Returns
-------
5-Tuple with the BLEU score, n-gram precisions, brevity penalty,
reference length, and translation length
"""
precision_numerators = [0 for _ in range(max_n)]
precision_denominators = [0 for _ in range(max_n)]
ref_length, trans_length = 0, 0
for references in reference_corpus_list:
assert len(references) == len(translation_corpus), \
'The number of translations and their references do not match'
if tokenized:
assert isinstance(reference_corpus_list[0][0], LIST_TYPES) and \
isinstance(translation_corpus[0], LIST_TYPES), \
'references and translation should have format of list of list(list(str)) ' \
'and list(list(str)), respectively, when tokenized is True.'
else:
assert isinstance(reference_corpus_list[0][0], six.string_types) and \
isinstance(translation_corpus[0], six.string_types), \
'references and translation should have format of list(list(str)) ' \
'and list(str), respectively, when tokenized is False.'
for references, translation in zip(zip(*reference_corpus_list), translation_corpus):
if not tokenized:
references = [TOKENIZERS[tokenizer](reference).split() for reference in references]
translation = TOKENIZERS[tokenizer](translation).split()
if bpe:
references = [_bpe_to_words(reference) for reference in references]
translation = _bpe_to_words(translation)
if split_compound_word:
references = [_split_compound_word(reference) for reference in references]
translation = _split_compound_word(translation)
if lower_case:
references = [[w.lower() for w in reference] for reference in references]
translation = [w.lower() for w in translation]
trans_len = len(translation)
trans_length += trans_len
ref_length += _closest_ref_length(references, trans_len)
for n in range(max_n):
matches, candidates = _compute_precision(references, translation, n + 1)
precision_numerators[n] += matches
precision_denominators[n] += candidates
precision_fractions = [(precision_numerators[n], precision_denominators[n])
for n in range(max_n)]
smooth_const = 0
if smooth:
smooth_const = 1
precisions = _smoothing(precision_fractions, smooth_const)
if min(precisions) > 0:
precision_log_average = sum(math.log(p) for p in precisions) / max_n
precision_exp_log_average = math.exp(precision_log_average)
else:
precision_exp_log_average = 0
bp = _brevity_penalty(ref_length, trans_length)
bleu = precision_exp_log_average*bp
return bleu, precisions, bp, ref_length, trans_length | python | def compute_bleu(reference_corpus_list, translation_corpus, tokenized=True,
tokenizer='13a', max_n=4, smooth=False, lower_case=False,
bpe=False, split_compound_word=False):
r"""Compute bleu score of translation against references.
Parameters
----------
reference_corpus_list: list of list(list(str)) or list of list(str)
list of list(list(str)): tokenized references
list of list(str): plain text
List of references for each translation.
translation_corpus: list(list(str)) or list(str)
list(list(str)): tokenized translation
list(str): plain text
Translations to score.
tokenized: bool, default True
Whether the inputs has been tokenized.
tokenizer: str or None, default '13a'
'13a': follow the tokenizer in mteval-v13a.pl
'intl': follow the international tokenizer in mteval-v14.pl
None: identity mapping on the string.
This option is ignored if tokenized is True
max_n: int, default 4
Maximum n-gram order to use when computing BLEU score.
smooth: bool, default False
Whether or not to compute smoothed bleu score.
lower_case: bool, default False
Whether or not to use lower case of tokens
split_compound_word: bool, default False
Whether or not to split compound words
"rich-text format" --> rich ##AT##-##AT## text format.
bpe: bool, default False
Whether or not the inputs are in BPE format
Returns
-------
5-Tuple with the BLEU score, n-gram precisions, brevity penalty,
reference length, and translation length
"""
precision_numerators = [0 for _ in range(max_n)]
precision_denominators = [0 for _ in range(max_n)]
ref_length, trans_length = 0, 0
for references in reference_corpus_list:
assert len(references) == len(translation_corpus), \
'The number of translations and their references do not match'
if tokenized:
assert isinstance(reference_corpus_list[0][0], LIST_TYPES) and \
isinstance(translation_corpus[0], LIST_TYPES), \
'references and translation should have format of list of list(list(str)) ' \
'and list(list(str)), respectively, when tokenized is True.'
else:
assert isinstance(reference_corpus_list[0][0], six.string_types) and \
isinstance(translation_corpus[0], six.string_types), \
'references and translation should have format of list(list(str)) ' \
'and list(str), respectively, when tokenized is False.'
for references, translation in zip(zip(*reference_corpus_list), translation_corpus):
if not tokenized:
references = [TOKENIZERS[tokenizer](reference).split() for reference in references]
translation = TOKENIZERS[tokenizer](translation).split()
if bpe:
references = [_bpe_to_words(reference) for reference in references]
translation = _bpe_to_words(translation)
if split_compound_word:
references = [_split_compound_word(reference) for reference in references]
translation = _split_compound_word(translation)
if lower_case:
references = [[w.lower() for w in reference] for reference in references]
translation = [w.lower() for w in translation]
trans_len = len(translation)
trans_length += trans_len
ref_length += _closest_ref_length(references, trans_len)
for n in range(max_n):
matches, candidates = _compute_precision(references, translation, n + 1)
precision_numerators[n] += matches
precision_denominators[n] += candidates
precision_fractions = [(precision_numerators[n], precision_denominators[n])
for n in range(max_n)]
smooth_const = 0
if smooth:
smooth_const = 1
precisions = _smoothing(precision_fractions, smooth_const)
if min(precisions) > 0:
precision_log_average = sum(math.log(p) for p in precisions) / max_n
precision_exp_log_average = math.exp(precision_log_average)
else:
precision_exp_log_average = 0
bp = _brevity_penalty(ref_length, trans_length)
bleu = precision_exp_log_average*bp
return bleu, precisions, bp, ref_length, trans_length | [
"def",
"compute_bleu",
"(",
"reference_corpus_list",
",",
"translation_corpus",
",",
"tokenized",
"=",
"True",
",",
"tokenizer",
"=",
"'13a'",
",",
"max_n",
"=",
"4",
",",
"smooth",
"=",
"False",
",",
"lower_case",
"=",
"False",
",",
"bpe",
"=",
"False",
",",
"split_compound_word",
"=",
"False",
")",
":",
"precision_numerators",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"max_n",
")",
"]",
"precision_denominators",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"max_n",
")",
"]",
"ref_length",
",",
"trans_length",
"=",
"0",
",",
"0",
"for",
"references",
"in",
"reference_corpus_list",
":",
"assert",
"len",
"(",
"references",
")",
"==",
"len",
"(",
"translation_corpus",
")",
",",
"'The number of translations and their references do not match'",
"if",
"tokenized",
":",
"assert",
"isinstance",
"(",
"reference_corpus_list",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"LIST_TYPES",
")",
"and",
"isinstance",
"(",
"translation_corpus",
"[",
"0",
"]",
",",
"LIST_TYPES",
")",
",",
"'references and translation should have format of list of list(list(str)) '",
"'and list(list(str)), respectively, when tokenized is True.'",
"else",
":",
"assert",
"isinstance",
"(",
"reference_corpus_list",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
"and",
"isinstance",
"(",
"translation_corpus",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
",",
"'references and translation should have format of list(list(str)) '",
"'and list(str), respectively, when tokenized is False.'",
"for",
"references",
",",
"translation",
"in",
"zip",
"(",
"zip",
"(",
"*",
"reference_corpus_list",
")",
",",
"translation_corpus",
")",
":",
"if",
"not",
"tokenized",
":",
"references",
"=",
"[",
"TOKENIZERS",
"[",
"tokenizer",
"]",
"(",
"reference",
")",
".",
"split",
"(",
")",
"for",
"reference",
"in",
"references",
"]",
"translation",
"=",
"TOKENIZERS",
"[",
"tokenizer",
"]",
"(",
"translation",
")",
".",
"split",
"(",
")",
"if",
"bpe",
":",
"references",
"=",
"[",
"_bpe_to_words",
"(",
"reference",
")",
"for",
"reference",
"in",
"references",
"]",
"translation",
"=",
"_bpe_to_words",
"(",
"translation",
")",
"if",
"split_compound_word",
":",
"references",
"=",
"[",
"_split_compound_word",
"(",
"reference",
")",
"for",
"reference",
"in",
"references",
"]",
"translation",
"=",
"_split_compound_word",
"(",
"translation",
")",
"if",
"lower_case",
":",
"references",
"=",
"[",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"reference",
"]",
"for",
"reference",
"in",
"references",
"]",
"translation",
"=",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"translation",
"]",
"trans_len",
"=",
"len",
"(",
"translation",
")",
"trans_length",
"+=",
"trans_len",
"ref_length",
"+=",
"_closest_ref_length",
"(",
"references",
",",
"trans_len",
")",
"for",
"n",
"in",
"range",
"(",
"max_n",
")",
":",
"matches",
",",
"candidates",
"=",
"_compute_precision",
"(",
"references",
",",
"translation",
",",
"n",
"+",
"1",
")",
"precision_numerators",
"[",
"n",
"]",
"+=",
"matches",
"precision_denominators",
"[",
"n",
"]",
"+=",
"candidates",
"precision_fractions",
"=",
"[",
"(",
"precision_numerators",
"[",
"n",
"]",
",",
"precision_denominators",
"[",
"n",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"max_n",
")",
"]",
"smooth_const",
"=",
"0",
"if",
"smooth",
":",
"smooth_const",
"=",
"1",
"precisions",
"=",
"_smoothing",
"(",
"precision_fractions",
",",
"smooth_const",
")",
"if",
"min",
"(",
"precisions",
")",
">",
"0",
":",
"precision_log_average",
"=",
"sum",
"(",
"math",
".",
"log",
"(",
"p",
")",
"for",
"p",
"in",
"precisions",
")",
"/",
"max_n",
"precision_exp_log_average",
"=",
"math",
".",
"exp",
"(",
"precision_log_average",
")",
"else",
":",
"precision_exp_log_average",
"=",
"0",
"bp",
"=",
"_brevity_penalty",
"(",
"ref_length",
",",
"trans_length",
")",
"bleu",
"=",
"precision_exp_log_average",
"*",
"bp",
"return",
"bleu",
",",
"precisions",
",",
"bp",
",",
"ref_length",
",",
"trans_length"
] | r"""Compute bleu score of translation against references.
Parameters
----------
reference_corpus_list: list of list(list(str)) or list of list(str)
list of list(list(str)): tokenized references
list of list(str): plain text
List of references for each translation.
translation_corpus: list(list(str)) or list(str)
list(list(str)): tokenized translation
list(str): plain text
Translations to score.
tokenized: bool, default True
Whether the inputs has been tokenized.
tokenizer: str or None, default '13a'
'13a': follow the tokenizer in mteval-v13a.pl
'intl': follow the international tokenizer in mteval-v14.pl
None: identity mapping on the string.
This option is ignored if tokenized is True
max_n: int, default 4
Maximum n-gram order to use when computing BLEU score.
smooth: bool, default False
Whether or not to compute smoothed bleu score.
lower_case: bool, default False
Whether or not to use lower case of tokens
split_compound_word: bool, default False
Whether or not to split compound words
"rich-text format" --> rich ##AT##-##AT## text format.
bpe: bool, default False
Whether or not the inputs are in BPE format
Returns
-------
5-Tuple with the BLEU score, n-gram precisions, brevity penalty,
reference length, and translation length | [
"r",
"Compute",
"bleu",
"score",
"of",
"translation",
"against",
"references",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L158-L249 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _compute_precision | def _compute_precision(references, translation, n):
"""Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number of possible nth order n-grams
"""
matches = 0
candidates = 0
ref_ngram_counts = Counter()
for reference in references:
ref_ngram_counts |= _ngrams(reference, n)
trans_ngram_counts = _ngrams(translation, n)
overlap_ngram_counts = trans_ngram_counts & ref_ngram_counts
matches += sum(overlap_ngram_counts.values())
possible_matches = len(translation) - n + 1
if possible_matches > 0:
candidates += possible_matches
return matches, candidates | python | def _compute_precision(references, translation, n):
"""Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number of possible nth order n-grams
"""
matches = 0
candidates = 0
ref_ngram_counts = Counter()
for reference in references:
ref_ngram_counts |= _ngrams(reference, n)
trans_ngram_counts = _ngrams(translation, n)
overlap_ngram_counts = trans_ngram_counts & ref_ngram_counts
matches += sum(overlap_ngram_counts.values())
possible_matches = len(translation) - n + 1
if possible_matches > 0:
candidates += possible_matches
return matches, candidates | [
"def",
"_compute_precision",
"(",
"references",
",",
"translation",
",",
"n",
")",
":",
"matches",
"=",
"0",
"candidates",
"=",
"0",
"ref_ngram_counts",
"=",
"Counter",
"(",
")",
"for",
"reference",
"in",
"references",
":",
"ref_ngram_counts",
"|=",
"_ngrams",
"(",
"reference",
",",
"n",
")",
"trans_ngram_counts",
"=",
"_ngrams",
"(",
"translation",
",",
"n",
")",
"overlap_ngram_counts",
"=",
"trans_ngram_counts",
"&",
"ref_ngram_counts",
"matches",
"+=",
"sum",
"(",
"overlap_ngram_counts",
".",
"values",
"(",
")",
")",
"possible_matches",
"=",
"len",
"(",
"translation",
")",
"-",
"n",
"+",
"1",
"if",
"possible_matches",
">",
"0",
":",
"candidates",
"+=",
"possible_matches",
"return",
"matches",
",",
"candidates"
] | Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number of possible nth order n-grams | [
"Compute",
"ngram",
"precision",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L252-L284 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _brevity_penalty | def _brevity_penalty(ref_length, trans_length):
"""Calculate brevity penalty.
Parameters
----------
ref_length: int
Sum of all closest references'lengths for every translations in a corpus
trans_length: int
Sum of all translations's lengths in a corpus.
Returns
-------
bleu's brevity penalty: float
"""
if trans_length > ref_length:
return 1
# If translation is empty, brevity penalty = 0 should result in BLEU = 0.0
elif trans_length == 0:
return 0
else:
return math.exp(1 - float(ref_length) / trans_length) | python | def _brevity_penalty(ref_length, trans_length):
"""Calculate brevity penalty.
Parameters
----------
ref_length: int
Sum of all closest references'lengths for every translations in a corpus
trans_length: int
Sum of all translations's lengths in a corpus.
Returns
-------
bleu's brevity penalty: float
"""
if trans_length > ref_length:
return 1
# If translation is empty, brevity penalty = 0 should result in BLEU = 0.0
elif trans_length == 0:
return 0
else:
return math.exp(1 - float(ref_length) / trans_length) | [
"def",
"_brevity_penalty",
"(",
"ref_length",
",",
"trans_length",
")",
":",
"if",
"trans_length",
">",
"ref_length",
":",
"return",
"1",
"# If translation is empty, brevity penalty = 0 should result in BLEU = 0.0",
"elif",
"trans_length",
"==",
"0",
":",
"return",
"0",
"else",
":",
"return",
"math",
".",
"exp",
"(",
"1",
"-",
"float",
"(",
"ref_length",
")",
"/",
"trans_length",
")"
] | Calculate brevity penalty.
Parameters
----------
ref_length: int
Sum of all closest references'lengths for every translations in a corpus
trans_length: int
Sum of all translations's lengths in a corpus.
Returns
-------
bleu's brevity penalty: float | [
"Calculate",
"brevity",
"penalty",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L287-L307 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _closest_ref_length | def _closest_ref_length(references, trans_length):
"""Find the reference that has the closest length to the translation.
Parameters
----------
references: list(list(str))
A list of references.
trans_length: int
Length of the translation.
Returns
-------
closest_ref_len: int
Length of the reference that is closest to the translation.
"""
ref_lengths = (len(reference) for reference in references)
closest_ref_len = min(ref_lengths,
key=lambda ref_length: (abs(ref_length - trans_length), ref_length))
return closest_ref_len | python | def _closest_ref_length(references, trans_length):
"""Find the reference that has the closest length to the translation.
Parameters
----------
references: list(list(str))
A list of references.
trans_length: int
Length of the translation.
Returns
-------
closest_ref_len: int
Length of the reference that is closest to the translation.
"""
ref_lengths = (len(reference) for reference in references)
closest_ref_len = min(ref_lengths,
key=lambda ref_length: (abs(ref_length - trans_length), ref_length))
return closest_ref_len | [
"def",
"_closest_ref_length",
"(",
"references",
",",
"trans_length",
")",
":",
"ref_lengths",
"=",
"(",
"len",
"(",
"reference",
")",
"for",
"reference",
"in",
"references",
")",
"closest_ref_len",
"=",
"min",
"(",
"ref_lengths",
",",
"key",
"=",
"lambda",
"ref_length",
":",
"(",
"abs",
"(",
"ref_length",
"-",
"trans_length",
")",
",",
"ref_length",
")",
")",
"return",
"closest_ref_len"
] | Find the reference that has the closest length to the translation.
Parameters
----------
references: list(list(str))
A list of references.
trans_length: int
Length of the translation.
Returns
-------
closest_ref_len: int
Length of the reference that is closest to the translation. | [
"Find",
"the",
"reference",
"that",
"has",
"the",
"closest",
"length",
"to",
"the",
"translation",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L310-L329 | train |
dmlc/gluon-nlp | scripts/machine_translation/bleu.py | _smoothing | def _smoothing(precision_fractions, c=1):
"""Compute the smoothed precision for all the orders.
Parameters
----------
precision_fractions: list(tuple)
Contain a list of (precision_numerator, precision_denominator) pairs
c: int, default 1
Smoothing constant to use
Returns
-------
ratios: list of floats
Contain the smoothed precision_fractions.
"""
ratios = [0] * len(precision_fractions)
for i, precision_fraction in enumerate(precision_fractions):
if precision_fraction[1] > 0:
ratios[i] = float(precision_fraction[0] + c) / (precision_fraction[1] + c)
else:
ratios[i] = 0.0
return ratios | python | def _smoothing(precision_fractions, c=1):
"""Compute the smoothed precision for all the orders.
Parameters
----------
precision_fractions: list(tuple)
Contain a list of (precision_numerator, precision_denominator) pairs
c: int, default 1
Smoothing constant to use
Returns
-------
ratios: list of floats
Contain the smoothed precision_fractions.
"""
ratios = [0] * len(precision_fractions)
for i, precision_fraction in enumerate(precision_fractions):
if precision_fraction[1] > 0:
ratios[i] = float(precision_fraction[0] + c) / (precision_fraction[1] + c)
else:
ratios[i] = 0.0
return ratios | [
"def",
"_smoothing",
"(",
"precision_fractions",
",",
"c",
"=",
"1",
")",
":",
"ratios",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"precision_fractions",
")",
"for",
"i",
",",
"precision_fraction",
"in",
"enumerate",
"(",
"precision_fractions",
")",
":",
"if",
"precision_fraction",
"[",
"1",
"]",
">",
"0",
":",
"ratios",
"[",
"i",
"]",
"=",
"float",
"(",
"precision_fraction",
"[",
"0",
"]",
"+",
"c",
")",
"/",
"(",
"precision_fraction",
"[",
"1",
"]",
"+",
"c",
")",
"else",
":",
"ratios",
"[",
"i",
"]",
"=",
"0.0",
"return",
"ratios"
] | Compute the smoothed precision for all the orders.
Parameters
----------
precision_fractions: list(tuple)
Contain a list of (precision_numerator, precision_denominator) pairs
c: int, default 1
Smoothing constant to use
Returns
-------
ratios: list of floats
Contain the smoothed precision_fractions. | [
"Compute",
"the",
"smoothed",
"precision",
"for",
"all",
"the",
"orders",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/bleu.py#L332-L354 | train |
dmlc/gluon-nlp | scripts/language_model/sampler.py | LogUniformSampler.forward | def forward(self, true_classes):
"""Draw samples from log uniform distribution and returns sampled candidates,
expected count for true classes and sampled classes.
Parameters
----------
true_classes: NDArray
The true classes.
Returns
-------
samples: NDArray
The sampled candidate classes.
expected_count_sample: NDArray
The expected count for sampled candidates.
expected_count_true: NDArray
The expected count for true classes in the same shape as `true_classes`.
"""
num_sampled = self._num_sampled
ctx = true_classes.context
num_tries = 0
log_range = math.log(self._range_max + 1)
# sample candidates
f = ndarray._internal._sample_unique_zipfian
sampled_classes, num_tries = f(self._range_max, shape=(1, num_sampled))
sampled_classes = sampled_classes.reshape((-1,))
sampled_classes = sampled_classes.as_in_context(ctx)
num_tries = num_tries.as_in_context(ctx)
# expected count for true classes
true_cls = true_classes.as_in_context(ctx).astype('float64')
prob_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range
count_true = self._prob_helper(num_tries, prob_true)
# expected count for sampled classes
sampled_classes = ndarray.array(sampled_classes, ctx=ctx, dtype='int64')
sampled_cls_fp64 = sampled_classes.astype('float64')
prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
count_sampled = self._prob_helper(num_tries, prob_sampled)
# convert to dtype
sampled_classes = sampled_classes.astype(self._dtype, copy=False)
count_true = count_true.astype(self._dtype, copy=False)
count_sampled = count_sampled.astype(self._dtype, copy=False)
return sampled_classes, count_sampled, count_true | python | def forward(self, true_classes):
"""Draw samples from log uniform distribution and returns sampled candidates,
expected count for true classes and sampled classes.
Parameters
----------
true_classes: NDArray
The true classes.
Returns
-------
samples: NDArray
The sampled candidate classes.
expected_count_sample: NDArray
The expected count for sampled candidates.
expected_count_true: NDArray
The expected count for true classes in the same shape as `true_classes`.
"""
num_sampled = self._num_sampled
ctx = true_classes.context
num_tries = 0
log_range = math.log(self._range_max + 1)
# sample candidates
f = ndarray._internal._sample_unique_zipfian
sampled_classes, num_tries = f(self._range_max, shape=(1, num_sampled))
sampled_classes = sampled_classes.reshape((-1,))
sampled_classes = sampled_classes.as_in_context(ctx)
num_tries = num_tries.as_in_context(ctx)
# expected count for true classes
true_cls = true_classes.as_in_context(ctx).astype('float64')
prob_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range
count_true = self._prob_helper(num_tries, prob_true)
# expected count for sampled classes
sampled_classes = ndarray.array(sampled_classes, ctx=ctx, dtype='int64')
sampled_cls_fp64 = sampled_classes.astype('float64')
prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
count_sampled = self._prob_helper(num_tries, prob_sampled)
# convert to dtype
sampled_classes = sampled_classes.astype(self._dtype, copy=False)
count_true = count_true.astype(self._dtype, copy=False)
count_sampled = count_sampled.astype(self._dtype, copy=False)
return sampled_classes, count_sampled, count_true | [
"def",
"forward",
"(",
"self",
",",
"true_classes",
")",
":",
"num_sampled",
"=",
"self",
".",
"_num_sampled",
"ctx",
"=",
"true_classes",
".",
"context",
"num_tries",
"=",
"0",
"log_range",
"=",
"math",
".",
"log",
"(",
"self",
".",
"_range_max",
"+",
"1",
")",
"# sample candidates",
"f",
"=",
"ndarray",
".",
"_internal",
".",
"_sample_unique_zipfian",
"sampled_classes",
",",
"num_tries",
"=",
"f",
"(",
"self",
".",
"_range_max",
",",
"shape",
"=",
"(",
"1",
",",
"num_sampled",
")",
")",
"sampled_classes",
"=",
"sampled_classes",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
"sampled_classes",
"=",
"sampled_classes",
".",
"as_in_context",
"(",
"ctx",
")",
"num_tries",
"=",
"num_tries",
".",
"as_in_context",
"(",
"ctx",
")",
"# expected count for true classes",
"true_cls",
"=",
"true_classes",
".",
"as_in_context",
"(",
"ctx",
")",
".",
"astype",
"(",
"'float64'",
")",
"prob_true",
"=",
"(",
"(",
"true_cls",
"+",
"2.0",
")",
"/",
"(",
"true_cls",
"+",
"1.0",
")",
")",
".",
"log",
"(",
")",
"/",
"log_range",
"count_true",
"=",
"self",
".",
"_prob_helper",
"(",
"num_tries",
",",
"prob_true",
")",
"# expected count for sampled classes",
"sampled_classes",
"=",
"ndarray",
".",
"array",
"(",
"sampled_classes",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"'int64'",
")",
"sampled_cls_fp64",
"=",
"sampled_classes",
".",
"astype",
"(",
"'float64'",
")",
"prob_sampled",
"=",
"(",
"(",
"sampled_cls_fp64",
"+",
"2.0",
")",
"/",
"(",
"sampled_cls_fp64",
"+",
"1.0",
")",
")",
".",
"log",
"(",
")",
"/",
"log_range",
"count_sampled",
"=",
"self",
".",
"_prob_helper",
"(",
"num_tries",
",",
"prob_sampled",
")",
"# convert to dtype",
"sampled_classes",
"=",
"sampled_classes",
".",
"astype",
"(",
"self",
".",
"_dtype",
",",
"copy",
"=",
"False",
")",
"count_true",
"=",
"count_true",
".",
"astype",
"(",
"self",
".",
"_dtype",
",",
"copy",
"=",
"False",
")",
"count_sampled",
"=",
"count_sampled",
".",
"astype",
"(",
"self",
".",
"_dtype",
",",
"copy",
"=",
"False",
")",
"return",
"sampled_classes",
",",
"count_sampled",
",",
"count_true"
] | Draw samples from log uniform distribution and returns sampled candidates,
expected count for true classes and sampled classes.
Parameters
----------
true_classes: NDArray
The true classes.
Returns
-------
samples: NDArray
The sampled candidate classes.
expected_count_sample: NDArray
The expected count for sampled candidates.
expected_count_true: NDArray
The expected count for true classes in the same shape as `true_classes`. | [
"Draw",
"samples",
"from",
"log",
"uniform",
"distribution",
"and",
"returns",
"sampled",
"candidates",
"expected",
"count",
"for",
"true",
"classes",
"and",
"sampled",
"classes",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/language_model/sampler.py#L66-L109 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | preprocess_dataset | def preprocess_dataset(data, min_freq=5, max_vocab_size=None):
"""Dataset preprocessing helper.
Parameters
----------
data : mx.data.Dataset
Input Dataset. For example gluonnlp.data.Text8 or gluonnlp.data.Fil9
min_freq : int, default 5
Minimum token frequency for a token to be included in the vocabulary
and returned DataStream.
max_vocab_size : int, optional
Specifies a maximum size for the vocabulary.
Returns
-------
gluonnlp.data.DataStream
Each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
gluonnlp.Vocab
Vocabulary of all tokens in Text8 that occur at least min_freq times of
maximum size max_vocab_size.
idx_to_counts : list of int
Mapping from token indices to their occurrence-counts in the Text8
dataset.
"""
with print_time('count and construct vocabulary'):
counter = nlp.data.count_tokens(itertools.chain.from_iterable(data))
vocab = nlp.Vocab(counter, unknown_token=None, padding_token=None,
bos_token=None, eos_token=None, min_freq=min_freq,
max_size=max_vocab_size)
idx_to_counts = [counter[w] for w in vocab.idx_to_token]
def code(sentence):
return [vocab[token] for token in sentence if token in vocab]
with print_time('code data'):
data = data.transform(code, lazy=False)
data = nlp.data.SimpleDataStream([data])
return data, vocab, idx_to_counts | python | def preprocess_dataset(data, min_freq=5, max_vocab_size=None):
"""Dataset preprocessing helper.
Parameters
----------
data : mx.data.Dataset
Input Dataset. For example gluonnlp.data.Text8 or gluonnlp.data.Fil9
min_freq : int, default 5
Minimum token frequency for a token to be included in the vocabulary
and returned DataStream.
max_vocab_size : int, optional
Specifies a maximum size for the vocabulary.
Returns
-------
gluonnlp.data.DataStream
Each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
gluonnlp.Vocab
Vocabulary of all tokens in Text8 that occur at least min_freq times of
maximum size max_vocab_size.
idx_to_counts : list of int
Mapping from token indices to their occurrence-counts in the Text8
dataset.
"""
with print_time('count and construct vocabulary'):
counter = nlp.data.count_tokens(itertools.chain.from_iterable(data))
vocab = nlp.Vocab(counter, unknown_token=None, padding_token=None,
bos_token=None, eos_token=None, min_freq=min_freq,
max_size=max_vocab_size)
idx_to_counts = [counter[w] for w in vocab.idx_to_token]
def code(sentence):
return [vocab[token] for token in sentence if token in vocab]
with print_time('code data'):
data = data.transform(code, lazy=False)
data = nlp.data.SimpleDataStream([data])
return data, vocab, idx_to_counts | [
"def",
"preprocess_dataset",
"(",
"data",
",",
"min_freq",
"=",
"5",
",",
"max_vocab_size",
"=",
"None",
")",
":",
"with",
"print_time",
"(",
"'count and construct vocabulary'",
")",
":",
"counter",
"=",
"nlp",
".",
"data",
".",
"count_tokens",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"data",
")",
")",
"vocab",
"=",
"nlp",
".",
"Vocab",
"(",
"counter",
",",
"unknown_token",
"=",
"None",
",",
"padding_token",
"=",
"None",
",",
"bos_token",
"=",
"None",
",",
"eos_token",
"=",
"None",
",",
"min_freq",
"=",
"min_freq",
",",
"max_size",
"=",
"max_vocab_size",
")",
"idx_to_counts",
"=",
"[",
"counter",
"[",
"w",
"]",
"for",
"w",
"in",
"vocab",
".",
"idx_to_token",
"]",
"def",
"code",
"(",
"sentence",
")",
":",
"return",
"[",
"vocab",
"[",
"token",
"]",
"for",
"token",
"in",
"sentence",
"if",
"token",
"in",
"vocab",
"]",
"with",
"print_time",
"(",
"'code data'",
")",
":",
"data",
"=",
"data",
".",
"transform",
"(",
"code",
",",
"lazy",
"=",
"False",
")",
"data",
"=",
"nlp",
".",
"data",
".",
"SimpleDataStream",
"(",
"[",
"data",
"]",
")",
"return",
"data",
",",
"vocab",
",",
"idx_to_counts"
] | Dataset preprocessing helper.
Parameters
----------
data : mx.data.Dataset
Input Dataset. For example gluonnlp.data.Text8 or gluonnlp.data.Fil9
min_freq : int, default 5
Minimum token frequency for a token to be included in the vocabulary
and returned DataStream.
max_vocab_size : int, optional
Specifies a maximum size for the vocabulary.
Returns
-------
gluonnlp.data.DataStream
Each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
gluonnlp.Vocab
Vocabulary of all tokens in Text8 that occur at least min_freq times of
maximum size max_vocab_size.
idx_to_counts : list of int
Mapping from token indices to their occurrence-counts in the Text8
dataset. | [
"Dataset",
"preprocessing",
"helper",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L47-L86 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | wiki | def wiki(wiki_root, wiki_date, wiki_language, max_vocab_size=None):
"""Wikipedia dump helper.
Parameters
----------
wiki_root : str
Parameter for WikiDumpStream
wiki_date : str
Parameter for WikiDumpStream
wiki_language : str
Parameter for WikiDumpStream
max_vocab_size : int, optional
Specifies a maximum size for the vocabulary.
Returns
-------
gluonnlp.data.DataStream
Each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
gluonnlp.Vocab
Vocabulary of all tokens in the Wikipedia corpus as provided by
WikiDumpStream but with maximum size max_vocab_size.
idx_to_counts : list of int
Mapping from token indices to their occurrence-counts in the Wikipedia
corpus.
"""
data = WikiDumpStream(
root=os.path.expanduser(wiki_root), language=wiki_language,
date=wiki_date)
vocab = data.vocab
if max_vocab_size:
for token in vocab.idx_to_token[max_vocab_size:]:
vocab.token_to_idx.pop(token)
vocab.idx_to_token = vocab.idx_to_token[:max_vocab_size]
idx_to_counts = data.idx_to_counts
def code(shard):
return [[vocab[token] for token in sentence if token in vocab]
for sentence in shard]
data = data.transform(code)
return data, vocab, idx_to_counts | python | def wiki(wiki_root, wiki_date, wiki_language, max_vocab_size=None):
"""Wikipedia dump helper.
Parameters
----------
wiki_root : str
Parameter for WikiDumpStream
wiki_date : str
Parameter for WikiDumpStream
wiki_language : str
Parameter for WikiDumpStream
max_vocab_size : int, optional
Specifies a maximum size for the vocabulary.
Returns
-------
gluonnlp.data.DataStream
Each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
gluonnlp.Vocab
Vocabulary of all tokens in the Wikipedia corpus as provided by
WikiDumpStream but with maximum size max_vocab_size.
idx_to_counts : list of int
Mapping from token indices to their occurrence-counts in the Wikipedia
corpus.
"""
data = WikiDumpStream(
root=os.path.expanduser(wiki_root), language=wiki_language,
date=wiki_date)
vocab = data.vocab
if max_vocab_size:
for token in vocab.idx_to_token[max_vocab_size:]:
vocab.token_to_idx.pop(token)
vocab.idx_to_token = vocab.idx_to_token[:max_vocab_size]
idx_to_counts = data.idx_to_counts
def code(shard):
return [[vocab[token] for token in sentence if token in vocab]
for sentence in shard]
data = data.transform(code)
return data, vocab, idx_to_counts | [
"def",
"wiki",
"(",
"wiki_root",
",",
"wiki_date",
",",
"wiki_language",
",",
"max_vocab_size",
"=",
"None",
")",
":",
"data",
"=",
"WikiDumpStream",
"(",
"root",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"wiki_root",
")",
",",
"language",
"=",
"wiki_language",
",",
"date",
"=",
"wiki_date",
")",
"vocab",
"=",
"data",
".",
"vocab",
"if",
"max_vocab_size",
":",
"for",
"token",
"in",
"vocab",
".",
"idx_to_token",
"[",
"max_vocab_size",
":",
"]",
":",
"vocab",
".",
"token_to_idx",
".",
"pop",
"(",
"token",
")",
"vocab",
".",
"idx_to_token",
"=",
"vocab",
".",
"idx_to_token",
"[",
":",
"max_vocab_size",
"]",
"idx_to_counts",
"=",
"data",
".",
"idx_to_counts",
"def",
"code",
"(",
"shard",
")",
":",
"return",
"[",
"[",
"vocab",
"[",
"token",
"]",
"for",
"token",
"in",
"sentence",
"if",
"token",
"in",
"vocab",
"]",
"for",
"sentence",
"in",
"shard",
"]",
"data",
"=",
"data",
".",
"transform",
"(",
"code",
")",
"return",
"data",
",",
"vocab",
",",
"idx_to_counts"
] | Wikipedia dump helper.
Parameters
----------
wiki_root : str
Parameter for WikiDumpStream
wiki_date : str
Parameter for WikiDumpStream
wiki_language : str
Parameter for WikiDumpStream
max_vocab_size : int, optional
Specifies a maximum size for the vocabulary.
Returns
-------
gluonnlp.data.DataStream
Each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
gluonnlp.Vocab
Vocabulary of all tokens in the Wikipedia corpus as provided by
WikiDumpStream but with maximum size max_vocab_size.
idx_to_counts : list of int
Mapping from token indices to their occurrence-counts in the Wikipedia
corpus. | [
"Wikipedia",
"dump",
"helper",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L89-L131 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | transform_data_fasttext | def transform_data_fasttext(data, vocab, idx_to_counts, cbow, ngram_buckets,
ngrams, batch_size, window_size,
frequent_token_subsampling=1E-4, dtype='float32',
index_dtype='int64'):
"""Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose indices occur in data. For each
token, it's associated subwords will be computed and used for
constructing the batches. No subwords are used if ngram_buckets is 0.
idx_to_counts : list of int
List of integers such that idx_to_counts[idx] represents the count of
vocab.idx_to_token[idx] in the underlying dataset. The count
information is used to subsample frequent words in the dataset.
Each token is independently dropped with probability 1 - sqrt(t /
(count / sum_counts)) where t is the hyperparameter
frequent_token_subsampling.
cbow : boolean
If True, batches for CBOW are returned.
ngram_buckets : int
Number of hash buckets to consider for the fastText
nlp.vocab.NGramHashes subword function.
ngrams : list of int
For each integer n in the list, all ngrams of length n will be
considered by the nlp.vocab.NGramHashes subword function.
batch_size : int
The returned data stream iterates over batches of batch_size.
window_size : int
The context window size for
gluonnlp.data.EmbeddingCenterContextBatchify.
frequent_token_subsampling : float
Hyperparameter for subsampling. See idx_to_counts above for more
information.
dtype : str or np.dtype, default 'float32'
Data type of data array.
index_dtype : str or np.dtype, default 'int64'
Data type of index arrays.
Returns
-------
gluonnlp.data.DataStream
Stream over batches. Each returned element is a list corresponding to
the arguments for the forward pass of model.SG or model.CBOW
respectively based on if cbow is False or True. If ngarm_buckets > 0,
the returned sample will contain ngrams. Both model.SG or model.CBOW
will handle them correctly as long as they are initialized with the
subword_function returned as second argument by this function (see
below).
gluonnlp.vocab.NGramHashes
The subword_function used for obtaining the subwords in the returned
batches.
"""
if ngram_buckets <= 0:
raise ValueError('Invalid ngram_buckets. Use Word2Vec training '
'pipeline if not interested in ngrams.')
sum_counts = float(sum(idx_to_counts))
idx_to_pdiscard = [
1 - math.sqrt(frequent_token_subsampling / (count / sum_counts))
for count in idx_to_counts]
def subsample(shard):
return [[
t for t, r in zip(sentence,
np.random.uniform(0, 1, size=len(sentence)))
if r > idx_to_pdiscard[t]] for sentence in shard]
data = data.transform(subsample)
batchify = nlp.data.batchify.EmbeddingCenterContextBatchify(
batch_size=batch_size, window_size=window_size, cbow=cbow,
weight_dtype=dtype, index_dtype=index_dtype)
data = data.transform(batchify)
with print_time('prepare subwords'):
subword_function = nlp.vocab.create_subword_function(
'NGramHashes', ngrams=ngrams, num_subwords=ngram_buckets)
# Store subword indices for all words in vocabulary
idx_to_subwordidxs = list(subword_function(vocab.idx_to_token))
subwordidxs = np.concatenate(idx_to_subwordidxs)
subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
subwordidxsptr = np.concatenate([
np.zeros(1, dtype=np.int64), subwordidxsptr])
if cbow:
subword_lookup = functools.partial(
cbow_lookup, subwordidxs=subwordidxs,
subwordidxsptr=subwordidxsptr, offset=len(vocab))
else:
subword_lookup = functools.partial(
skipgram_lookup, subwordidxs=subwordidxs,
subwordidxsptr=subwordidxsptr, offset=len(vocab))
max_subwordidxs_len = max(len(s) for s in idx_to_subwordidxs)
if max_subwordidxs_len > 500:
warnings.warn(
'The word with largest number of subwords '
'has {} subwords, suggesting there are '
'some noisy words in your vocabulary. '
'You should filter out very long words '
'to avoid memory issues.'.format(max_subwordidxs_len))
data = UnchainStream(data)
if cbow:
batchify_fn = cbow_fasttext_batch
else:
batchify_fn = skipgram_fasttext_batch
batchify_fn = functools.partial(
batchify_fn, num_tokens=len(vocab) + len(subword_function),
subword_lookup=subword_lookup, dtype=dtype, index_dtype=index_dtype)
return data, batchify_fn, subword_function | python | def transform_data_fasttext(data, vocab, idx_to_counts, cbow, ngram_buckets,
ngrams, batch_size, window_size,
frequent_token_subsampling=1E-4, dtype='float32',
index_dtype='int64'):
"""Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose indices occur in data. For each
token, it's associated subwords will be computed and used for
constructing the batches. No subwords are used if ngram_buckets is 0.
idx_to_counts : list of int
List of integers such that idx_to_counts[idx] represents the count of
vocab.idx_to_token[idx] in the underlying dataset. The count
information is used to subsample frequent words in the dataset.
Each token is independently dropped with probability 1 - sqrt(t /
(count / sum_counts)) where t is the hyperparameter
frequent_token_subsampling.
cbow : boolean
If True, batches for CBOW are returned.
ngram_buckets : int
Number of hash buckets to consider for the fastText
nlp.vocab.NGramHashes subword function.
ngrams : list of int
For each integer n in the list, all ngrams of length n will be
considered by the nlp.vocab.NGramHashes subword function.
batch_size : int
The returned data stream iterates over batches of batch_size.
window_size : int
The context window size for
gluonnlp.data.EmbeddingCenterContextBatchify.
frequent_token_subsampling : float
Hyperparameter for subsampling. See idx_to_counts above for more
information.
dtype : str or np.dtype, default 'float32'
Data type of data array.
index_dtype : str or np.dtype, default 'int64'
Data type of index arrays.
Returns
-------
gluonnlp.data.DataStream
Stream over batches. Each returned element is a list corresponding to
the arguments for the forward pass of model.SG or model.CBOW
respectively based on if cbow is False or True. If ngarm_buckets > 0,
the returned sample will contain ngrams. Both model.SG or model.CBOW
will handle them correctly as long as they are initialized with the
subword_function returned as second argument by this function (see
below).
gluonnlp.vocab.NGramHashes
The subword_function used for obtaining the subwords in the returned
batches.
"""
if ngram_buckets <= 0:
raise ValueError('Invalid ngram_buckets. Use Word2Vec training '
'pipeline if not interested in ngrams.')
sum_counts = float(sum(idx_to_counts))
idx_to_pdiscard = [
1 - math.sqrt(frequent_token_subsampling / (count / sum_counts))
for count in idx_to_counts]
def subsample(shard):
return [[
t for t, r in zip(sentence,
np.random.uniform(0, 1, size=len(sentence)))
if r > idx_to_pdiscard[t]] for sentence in shard]
data = data.transform(subsample)
batchify = nlp.data.batchify.EmbeddingCenterContextBatchify(
batch_size=batch_size, window_size=window_size, cbow=cbow,
weight_dtype=dtype, index_dtype=index_dtype)
data = data.transform(batchify)
with print_time('prepare subwords'):
subword_function = nlp.vocab.create_subword_function(
'NGramHashes', ngrams=ngrams, num_subwords=ngram_buckets)
# Store subword indices for all words in vocabulary
idx_to_subwordidxs = list(subword_function(vocab.idx_to_token))
subwordidxs = np.concatenate(idx_to_subwordidxs)
subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
subwordidxsptr = np.concatenate([
np.zeros(1, dtype=np.int64), subwordidxsptr])
if cbow:
subword_lookup = functools.partial(
cbow_lookup, subwordidxs=subwordidxs,
subwordidxsptr=subwordidxsptr, offset=len(vocab))
else:
subword_lookup = functools.partial(
skipgram_lookup, subwordidxs=subwordidxs,
subwordidxsptr=subwordidxsptr, offset=len(vocab))
max_subwordidxs_len = max(len(s) for s in idx_to_subwordidxs)
if max_subwordidxs_len > 500:
warnings.warn(
'The word with largest number of subwords '
'has {} subwords, suggesting there are '
'some noisy words in your vocabulary. '
'You should filter out very long words '
'to avoid memory issues.'.format(max_subwordidxs_len))
data = UnchainStream(data)
if cbow:
batchify_fn = cbow_fasttext_batch
else:
batchify_fn = skipgram_fasttext_batch
batchify_fn = functools.partial(
batchify_fn, num_tokens=len(vocab) + len(subword_function),
subword_lookup=subword_lookup, dtype=dtype, index_dtype=index_dtype)
return data, batchify_fn, subword_function | [
"def",
"transform_data_fasttext",
"(",
"data",
",",
"vocab",
",",
"idx_to_counts",
",",
"cbow",
",",
"ngram_buckets",
",",
"ngrams",
",",
"batch_size",
",",
"window_size",
",",
"frequent_token_subsampling",
"=",
"1E-4",
",",
"dtype",
"=",
"'float32'",
",",
"index_dtype",
"=",
"'int64'",
")",
":",
"if",
"ngram_buckets",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Invalid ngram_buckets. Use Word2Vec training '",
"'pipeline if not interested in ngrams.'",
")",
"sum_counts",
"=",
"float",
"(",
"sum",
"(",
"idx_to_counts",
")",
")",
"idx_to_pdiscard",
"=",
"[",
"1",
"-",
"math",
".",
"sqrt",
"(",
"frequent_token_subsampling",
"/",
"(",
"count",
"/",
"sum_counts",
")",
")",
"for",
"count",
"in",
"idx_to_counts",
"]",
"def",
"subsample",
"(",
"shard",
")",
":",
"return",
"[",
"[",
"t",
"for",
"t",
",",
"r",
"in",
"zip",
"(",
"sentence",
",",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
",",
"size",
"=",
"len",
"(",
"sentence",
")",
")",
")",
"if",
"r",
">",
"idx_to_pdiscard",
"[",
"t",
"]",
"]",
"for",
"sentence",
"in",
"shard",
"]",
"data",
"=",
"data",
".",
"transform",
"(",
"subsample",
")",
"batchify",
"=",
"nlp",
".",
"data",
".",
"batchify",
".",
"EmbeddingCenterContextBatchify",
"(",
"batch_size",
"=",
"batch_size",
",",
"window_size",
"=",
"window_size",
",",
"cbow",
"=",
"cbow",
",",
"weight_dtype",
"=",
"dtype",
",",
"index_dtype",
"=",
"index_dtype",
")",
"data",
"=",
"data",
".",
"transform",
"(",
"batchify",
")",
"with",
"print_time",
"(",
"'prepare subwords'",
")",
":",
"subword_function",
"=",
"nlp",
".",
"vocab",
".",
"create_subword_function",
"(",
"'NGramHashes'",
",",
"ngrams",
"=",
"ngrams",
",",
"num_subwords",
"=",
"ngram_buckets",
")",
"# Store subword indices for all words in vocabulary",
"idx_to_subwordidxs",
"=",
"list",
"(",
"subword_function",
"(",
"vocab",
".",
"idx_to_token",
")",
")",
"subwordidxs",
"=",
"np",
".",
"concatenate",
"(",
"idx_to_subwordidxs",
")",
"subwordidxsptr",
"=",
"np",
".",
"cumsum",
"(",
"[",
"len",
"(",
"subwordidxs",
")",
"for",
"subwordidxs",
"in",
"idx_to_subwordidxs",
"]",
")",
"subwordidxsptr",
"=",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"zeros",
"(",
"1",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
",",
"subwordidxsptr",
"]",
")",
"if",
"cbow",
":",
"subword_lookup",
"=",
"functools",
".",
"partial",
"(",
"cbow_lookup",
",",
"subwordidxs",
"=",
"subwordidxs",
",",
"subwordidxsptr",
"=",
"subwordidxsptr",
",",
"offset",
"=",
"len",
"(",
"vocab",
")",
")",
"else",
":",
"subword_lookup",
"=",
"functools",
".",
"partial",
"(",
"skipgram_lookup",
",",
"subwordidxs",
"=",
"subwordidxs",
",",
"subwordidxsptr",
"=",
"subwordidxsptr",
",",
"offset",
"=",
"len",
"(",
"vocab",
")",
")",
"max_subwordidxs_len",
"=",
"max",
"(",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"idx_to_subwordidxs",
")",
"if",
"max_subwordidxs_len",
">",
"500",
":",
"warnings",
".",
"warn",
"(",
"'The word with largest number of subwords '",
"'has {} subwords, suggesting there are '",
"'some noisy words in your vocabulary. '",
"'You should filter out very long words '",
"'to avoid memory issues.'",
".",
"format",
"(",
"max_subwordidxs_len",
")",
")",
"data",
"=",
"UnchainStream",
"(",
"data",
")",
"if",
"cbow",
":",
"batchify_fn",
"=",
"cbow_fasttext_batch",
"else",
":",
"batchify_fn",
"=",
"skipgram_fasttext_batch",
"batchify_fn",
"=",
"functools",
".",
"partial",
"(",
"batchify_fn",
",",
"num_tokens",
"=",
"len",
"(",
"vocab",
")",
"+",
"len",
"(",
"subword_function",
")",
",",
"subword_lookup",
"=",
"subword_lookup",
",",
"dtype",
"=",
"dtype",
",",
"index_dtype",
"=",
"index_dtype",
")",
"return",
"data",
",",
"batchify_fn",
",",
"subword_function"
] | Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose indices occur in data. For each
token, it's associated subwords will be computed and used for
constructing the batches. No subwords are used if ngram_buckets is 0.
idx_to_counts : list of int
List of integers such that idx_to_counts[idx] represents the count of
vocab.idx_to_token[idx] in the underlying dataset. The count
information is used to subsample frequent words in the dataset.
Each token is independently dropped with probability 1 - sqrt(t /
(count / sum_counts)) where t is the hyperparameter
frequent_token_subsampling.
cbow : boolean
If True, batches for CBOW are returned.
ngram_buckets : int
Number of hash buckets to consider for the fastText
nlp.vocab.NGramHashes subword function.
ngrams : list of int
For each integer n in the list, all ngrams of length n will be
considered by the nlp.vocab.NGramHashes subword function.
batch_size : int
The returned data stream iterates over batches of batch_size.
window_size : int
The context window size for
gluonnlp.data.EmbeddingCenterContextBatchify.
frequent_token_subsampling : float
Hyperparameter for subsampling. See idx_to_counts above for more
information.
dtype : str or np.dtype, default 'float32'
Data type of data array.
index_dtype : str or np.dtype, default 'int64'
Data type of index arrays.
Returns
-------
gluonnlp.data.DataStream
Stream over batches. Each returned element is a list corresponding to
the arguments for the forward pass of model.SG or model.CBOW
respectively based on if cbow is False or True. If ngarm_buckets > 0,
the returned sample will contain ngrams. Both model.SG or model.CBOW
will handle them correctly as long as they are initialized with the
subword_function returned as second argument by this function (see
below).
gluonnlp.vocab.NGramHashes
The subword_function used for obtaining the subwords in the returned
batches. | [
"Transform",
"a",
"DataStream",
"of",
"coded",
"DataSets",
"to",
"a",
"DataStream",
"of",
"batches",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L134-L252 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | transform_data_word2vec | def transform_data_word2vec(data, vocab, idx_to_counts, cbow, batch_size,
window_size, frequent_token_subsampling=1E-4,
dtype='float32', index_dtype='int64'):
"""Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose indices occur in data.
idx_to_counts : list of int
List of integers such that idx_to_counts[idx] represents the count of
vocab.idx_to_token[idx] in the underlying dataset. The count
information is used to subsample frequent words in the dataset.
Each token is independently dropped with probability 1 - sqrt(t /
(count / sum_counts)) where t is the hyperparameter
frequent_token_subsampling.
batch_size : int
The returned data stream iterates over batches of batch_size.
window_size : int
The context window size for
gluonnlp.data.EmbeddingCenterContextBatchify.
frequent_token_subsampling : float
Hyperparameter for subsampling. See idx_to_counts above for more
information.
dtype : str or np.dtype, default 'float32'
Data type of data array.
index_dtype : str or np.dtype, default 'int64'
Data type of index arrays.
Returns
-------
gluonnlp.data.DataStream
Stream over batches.
"""
sum_counts = float(sum(idx_to_counts))
idx_to_pdiscard = [
1 - math.sqrt(frequent_token_subsampling / (count / sum_counts))
for count in idx_to_counts]
def subsample(shard):
return [[
t for t, r in zip(sentence,
np.random.uniform(0, 1, size=len(sentence)))
if r > idx_to_pdiscard[t]] for sentence in shard]
data = data.transform(subsample)
batchify = nlp.data.batchify.EmbeddingCenterContextBatchify(
batch_size=batch_size, window_size=window_size, cbow=cbow,
weight_dtype=dtype, index_dtype=index_dtype)
data = data.transform(batchify)
data = UnchainStream(data)
if cbow:
batchify_fn = cbow_batch
else:
batchify_fn = skipgram_batch
batchify_fn = functools.partial(batchify_fn, num_tokens=len(vocab),
dtype=dtype, index_dtype=index_dtype)
return data, batchify_fn, | python | def transform_data_word2vec(data, vocab, idx_to_counts, cbow, batch_size,
window_size, frequent_token_subsampling=1E-4,
dtype='float32', index_dtype='int64'):
"""Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose indices occur in data.
idx_to_counts : list of int
List of integers such that idx_to_counts[idx] represents the count of
vocab.idx_to_token[idx] in the underlying dataset. The count
information is used to subsample frequent words in the dataset.
Each token is independently dropped with probability 1 - sqrt(t /
(count / sum_counts)) where t is the hyperparameter
frequent_token_subsampling.
batch_size : int
The returned data stream iterates over batches of batch_size.
window_size : int
The context window size for
gluonnlp.data.EmbeddingCenterContextBatchify.
frequent_token_subsampling : float
Hyperparameter for subsampling. See idx_to_counts above for more
information.
dtype : str or np.dtype, default 'float32'
Data type of data array.
index_dtype : str or np.dtype, default 'int64'
Data type of index arrays.
Returns
-------
gluonnlp.data.DataStream
Stream over batches.
"""
sum_counts = float(sum(idx_to_counts))
idx_to_pdiscard = [
1 - math.sqrt(frequent_token_subsampling / (count / sum_counts))
for count in idx_to_counts]
def subsample(shard):
return [[
t for t, r in zip(sentence,
np.random.uniform(0, 1, size=len(sentence)))
if r > idx_to_pdiscard[t]] for sentence in shard]
data = data.transform(subsample)
batchify = nlp.data.batchify.EmbeddingCenterContextBatchify(
batch_size=batch_size, window_size=window_size, cbow=cbow,
weight_dtype=dtype, index_dtype=index_dtype)
data = data.transform(batchify)
data = UnchainStream(data)
if cbow:
batchify_fn = cbow_batch
else:
batchify_fn = skipgram_batch
batchify_fn = functools.partial(batchify_fn, num_tokens=len(vocab),
dtype=dtype, index_dtype=index_dtype)
return data, batchify_fn, | [
"def",
"transform_data_word2vec",
"(",
"data",
",",
"vocab",
",",
"idx_to_counts",
",",
"cbow",
",",
"batch_size",
",",
"window_size",
",",
"frequent_token_subsampling",
"=",
"1E-4",
",",
"dtype",
"=",
"'float32'",
",",
"index_dtype",
"=",
"'int64'",
")",
":",
"sum_counts",
"=",
"float",
"(",
"sum",
"(",
"idx_to_counts",
")",
")",
"idx_to_pdiscard",
"=",
"[",
"1",
"-",
"math",
".",
"sqrt",
"(",
"frequent_token_subsampling",
"/",
"(",
"count",
"/",
"sum_counts",
")",
")",
"for",
"count",
"in",
"idx_to_counts",
"]",
"def",
"subsample",
"(",
"shard",
")",
":",
"return",
"[",
"[",
"t",
"for",
"t",
",",
"r",
"in",
"zip",
"(",
"sentence",
",",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
",",
"size",
"=",
"len",
"(",
"sentence",
")",
")",
")",
"if",
"r",
">",
"idx_to_pdiscard",
"[",
"t",
"]",
"]",
"for",
"sentence",
"in",
"shard",
"]",
"data",
"=",
"data",
".",
"transform",
"(",
"subsample",
")",
"batchify",
"=",
"nlp",
".",
"data",
".",
"batchify",
".",
"EmbeddingCenterContextBatchify",
"(",
"batch_size",
"=",
"batch_size",
",",
"window_size",
"=",
"window_size",
",",
"cbow",
"=",
"cbow",
",",
"weight_dtype",
"=",
"dtype",
",",
"index_dtype",
"=",
"index_dtype",
")",
"data",
"=",
"data",
".",
"transform",
"(",
"batchify",
")",
"data",
"=",
"UnchainStream",
"(",
"data",
")",
"if",
"cbow",
":",
"batchify_fn",
"=",
"cbow_batch",
"else",
":",
"batchify_fn",
"=",
"skipgram_batch",
"batchify_fn",
"=",
"functools",
".",
"partial",
"(",
"batchify_fn",
",",
"num_tokens",
"=",
"len",
"(",
"vocab",
")",
",",
"dtype",
"=",
"dtype",
",",
"index_dtype",
"=",
"index_dtype",
")",
"return",
"data",
",",
"batchify_fn",
","
] | Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose indices occur in data.
idx_to_counts : list of int
List of integers such that idx_to_counts[idx] represents the count of
vocab.idx_to_token[idx] in the underlying dataset. The count
information is used to subsample frequent words in the dataset.
Each token is independently dropped with probability 1 - sqrt(t /
(count / sum_counts)) where t is the hyperparameter
frequent_token_subsampling.
batch_size : int
The returned data stream iterates over batches of batch_size.
window_size : int
The context window size for
gluonnlp.data.EmbeddingCenterContextBatchify.
frequent_token_subsampling : float
Hyperparameter for subsampling. See idx_to_counts above for more
information.
dtype : str or np.dtype, default 'float32'
Data type of data array.
index_dtype : str or np.dtype, default 'int64'
Data type of index arrays.
Returns
-------
gluonnlp.data.DataStream
Stream over batches. | [
"Transform",
"a",
"DataStream",
"of",
"coded",
"DataSets",
"to",
"a",
"DataStream",
"of",
"batches",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L255-L319 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | cbow_fasttext_batch | def cbow_fasttext_batch(centers, contexts, num_tokens, subword_lookup, dtype,
index_dtype):
"""Create a batch for CBOW training objective with subwords."""
_, contexts_row, contexts_col = contexts
data, row, col = subword_lookup(contexts_row, contexts_col)
centers = mx.nd.array(centers, dtype=index_dtype)
contexts = mx.nd.sparse.csr_matrix(
(data, (row, col)), dtype=dtype,
shape=(len(centers), num_tokens)) # yapf: disable
return centers, contexts | python | def cbow_fasttext_batch(centers, contexts, num_tokens, subword_lookup, dtype,
index_dtype):
"""Create a batch for CBOW training objective with subwords."""
_, contexts_row, contexts_col = contexts
data, row, col = subword_lookup(contexts_row, contexts_col)
centers = mx.nd.array(centers, dtype=index_dtype)
contexts = mx.nd.sparse.csr_matrix(
(data, (row, col)), dtype=dtype,
shape=(len(centers), num_tokens)) # yapf: disable
return centers, contexts | [
"def",
"cbow_fasttext_batch",
"(",
"centers",
",",
"contexts",
",",
"num_tokens",
",",
"subword_lookup",
",",
"dtype",
",",
"index_dtype",
")",
":",
"_",
",",
"contexts_row",
",",
"contexts_col",
"=",
"contexts",
"data",
",",
"row",
",",
"col",
"=",
"subword_lookup",
"(",
"contexts_row",
",",
"contexts_col",
")",
"centers",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"centers",
",",
"dtype",
"=",
"index_dtype",
")",
"contexts",
"=",
"mx",
".",
"nd",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"(",
"row",
",",
"col",
")",
")",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"(",
"len",
"(",
"centers",
")",
",",
"num_tokens",
")",
")",
"# yapf: disable",
"return",
"centers",
",",
"contexts"
] | Create a batch for CBOW training objective with subwords. | [
"Create",
"a",
"batch",
"for",
"CBOW",
"training",
"objective",
"with",
"subwords",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L322-L331 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | skipgram_fasttext_batch | def skipgram_fasttext_batch(centers, contexts, num_tokens, subword_lookup,
dtype, index_dtype):
"""Create a batch for SG training objective with subwords."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
data, row, col = subword_lookup(centers)
centers = mx.nd.array(centers, dtype=index_dtype)
centers_csr = mx.nd.sparse.csr_matrix(
(data, (row, col)), dtype=dtype,
shape=(len(centers), num_tokens)) # yapf: disable
return centers_csr, contexts, centers | python | def skipgram_fasttext_batch(centers, contexts, num_tokens, subword_lookup,
dtype, index_dtype):
"""Create a batch for SG training objective with subwords."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
data, row, col = subword_lookup(centers)
centers = mx.nd.array(centers, dtype=index_dtype)
centers_csr = mx.nd.sparse.csr_matrix(
(data, (row, col)), dtype=dtype,
shape=(len(centers), num_tokens)) # yapf: disable
return centers_csr, contexts, centers | [
"def",
"skipgram_fasttext_batch",
"(",
"centers",
",",
"contexts",
",",
"num_tokens",
",",
"subword_lookup",
",",
"dtype",
",",
"index_dtype",
")",
":",
"contexts",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"contexts",
"[",
"2",
"]",
",",
"dtype",
"=",
"index_dtype",
")",
"data",
",",
"row",
",",
"col",
"=",
"subword_lookup",
"(",
"centers",
")",
"centers",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"centers",
",",
"dtype",
"=",
"index_dtype",
")",
"centers_csr",
"=",
"mx",
".",
"nd",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"(",
"row",
",",
"col",
")",
")",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"(",
"len",
"(",
"centers",
")",
",",
"num_tokens",
")",
")",
"# yapf: disable",
"return",
"centers_csr",
",",
"contexts",
",",
"centers"
] | Create a batch for SG training objective with subwords. | [
"Create",
"a",
"batch",
"for",
"SG",
"training",
"objective",
"with",
"subwords",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L334-L343 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | cbow_batch | def cbow_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for CBOW training objective."""
contexts_data, contexts_row, contexts_col = contexts
centers = mx.nd.array(centers, dtype=index_dtype)
contexts = mx.nd.sparse.csr_matrix(
(contexts_data, (contexts_row, contexts_col)),
dtype=dtype, shape=(len(centers), num_tokens)) # yapf: disable
return centers, contexts | python | def cbow_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for CBOW training objective."""
contexts_data, contexts_row, contexts_col = contexts
centers = mx.nd.array(centers, dtype=index_dtype)
contexts = mx.nd.sparse.csr_matrix(
(contexts_data, (contexts_row, contexts_col)),
dtype=dtype, shape=(len(centers), num_tokens)) # yapf: disable
return centers, contexts | [
"def",
"cbow_batch",
"(",
"centers",
",",
"contexts",
",",
"num_tokens",
",",
"dtype",
",",
"index_dtype",
")",
":",
"contexts_data",
",",
"contexts_row",
",",
"contexts_col",
"=",
"contexts",
"centers",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"centers",
",",
"dtype",
"=",
"index_dtype",
")",
"contexts",
"=",
"mx",
".",
"nd",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"contexts_data",
",",
"(",
"contexts_row",
",",
"contexts_col",
")",
")",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"(",
"len",
"(",
"centers",
")",
",",
"num_tokens",
")",
")",
"# yapf: disable",
"return",
"centers",
",",
"contexts"
] | Create a batch for CBOW training objective. | [
"Create",
"a",
"batch",
"for",
"CBOW",
"training",
"objective",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L346-L353 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | skipgram_batch | def skipgram_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for SG training objective."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
indptr = mx.nd.arange(len(centers) + 1)
centers = mx.nd.array(centers, dtype=index_dtype)
centers_csr = mx.nd.sparse.csr_matrix(
(mx.nd.ones(centers.shape), centers, indptr), dtype=dtype,
shape=(len(centers), num_tokens))
return centers_csr, contexts, centers | python | def skipgram_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for SG training objective."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
indptr = mx.nd.arange(len(centers) + 1)
centers = mx.nd.array(centers, dtype=index_dtype)
centers_csr = mx.nd.sparse.csr_matrix(
(mx.nd.ones(centers.shape), centers, indptr), dtype=dtype,
shape=(len(centers), num_tokens))
return centers_csr, contexts, centers | [
"def",
"skipgram_batch",
"(",
"centers",
",",
"contexts",
",",
"num_tokens",
",",
"dtype",
",",
"index_dtype",
")",
":",
"contexts",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"contexts",
"[",
"2",
"]",
",",
"dtype",
"=",
"index_dtype",
")",
"indptr",
"=",
"mx",
".",
"nd",
".",
"arange",
"(",
"len",
"(",
"centers",
")",
"+",
"1",
")",
"centers",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"centers",
",",
"dtype",
"=",
"index_dtype",
")",
"centers_csr",
"=",
"mx",
".",
"nd",
".",
"sparse",
".",
"csr_matrix",
"(",
"(",
"mx",
".",
"nd",
".",
"ones",
"(",
"centers",
".",
"shape",
")",
",",
"centers",
",",
"indptr",
")",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"(",
"len",
"(",
"centers",
")",
",",
"num_tokens",
")",
")",
"return",
"centers_csr",
",",
"contexts",
",",
"centers"
] | Create a batch for SG training objective. | [
"Create",
"a",
"batch",
"for",
"SG",
"training",
"objective",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L356-L364 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | skipgram_lookup | def skipgram_lookup(indices, subwordidxs, subwordidxsptr, offset=0):
"""Get a sparse COO array of words and subwords for SkipGram.
Parameters
----------
indices : numpy.ndarray
Array containing numbers in [0, vocabulary_size). The element at
position idx is taken to be the word that occurs at row idx in the
SkipGram batch.
offset : int
Offset to add to each subword index.
subwordidxs : numpy.ndarray
Array containing concatenation of all subwords of all tokens in the
vocabulary, in order of their occurrence in the vocabulary.
For example np.concatenate(idx_to_subwordidxs)
subwordidxsptr
Array containing pointers into subwordidxs array such that
subwordidxs[subwordidxsptr[i]:subwordidxsptr[i+1]] returns all subwords
of of token i. For example subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
offset : int, default 0
Offset to add to each subword index.
Returns
-------
numpy.ndarray of dtype float32
Array containing weights such that for each row, all weights sum to
1. In particular, all elements in a row have weight 1 /
num_elements_in_the_row
numpy.ndarray of dtype int64
This array is the row array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the col array of a sparse array of COO format.
"""
row = []
col = []
data = []
for i, idx in enumerate(indices):
start = subwordidxsptr[idx]
end = subwordidxsptr[idx + 1]
row.append(i)
col.append(idx)
data.append(1 / (1 + end - start))
for subword in subwordidxs[start:end]:
row.append(i)
col.append(subword + offset)
data.append(1 / (1 + end - start))
return (np.array(data, dtype=np.float32), np.array(row, dtype=np.int64),
np.array(col, dtype=np.int64)) | python | def skipgram_lookup(indices, subwordidxs, subwordidxsptr, offset=0):
"""Get a sparse COO array of words and subwords for SkipGram.
Parameters
----------
indices : numpy.ndarray
Array containing numbers in [0, vocabulary_size). The element at
position idx is taken to be the word that occurs at row idx in the
SkipGram batch.
offset : int
Offset to add to each subword index.
subwordidxs : numpy.ndarray
Array containing concatenation of all subwords of all tokens in the
vocabulary, in order of their occurrence in the vocabulary.
For example np.concatenate(idx_to_subwordidxs)
subwordidxsptr
Array containing pointers into subwordidxs array such that
subwordidxs[subwordidxsptr[i]:subwordidxsptr[i+1]] returns all subwords
of of token i. For example subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
offset : int, default 0
Offset to add to each subword index.
Returns
-------
numpy.ndarray of dtype float32
Array containing weights such that for each row, all weights sum to
1. In particular, all elements in a row have weight 1 /
num_elements_in_the_row
numpy.ndarray of dtype int64
This array is the row array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the col array of a sparse array of COO format.
"""
row = []
col = []
data = []
for i, idx in enumerate(indices):
start = subwordidxsptr[idx]
end = subwordidxsptr[idx + 1]
row.append(i)
col.append(idx)
data.append(1 / (1 + end - start))
for subword in subwordidxs[start:end]:
row.append(i)
col.append(subword + offset)
data.append(1 / (1 + end - start))
return (np.array(data, dtype=np.float32), np.array(row, dtype=np.int64),
np.array(col, dtype=np.int64)) | [
"def",
"skipgram_lookup",
"(",
"indices",
",",
"subwordidxs",
",",
"subwordidxsptr",
",",
"offset",
"=",
"0",
")",
":",
"row",
"=",
"[",
"]",
"col",
"=",
"[",
"]",
"data",
"=",
"[",
"]",
"for",
"i",
",",
"idx",
"in",
"enumerate",
"(",
"indices",
")",
":",
"start",
"=",
"subwordidxsptr",
"[",
"idx",
"]",
"end",
"=",
"subwordidxsptr",
"[",
"idx",
"+",
"1",
"]",
"row",
".",
"append",
"(",
"i",
")",
"col",
".",
"append",
"(",
"idx",
")",
"data",
".",
"append",
"(",
"1",
"/",
"(",
"1",
"+",
"end",
"-",
"start",
")",
")",
"for",
"subword",
"in",
"subwordidxs",
"[",
"start",
":",
"end",
"]",
":",
"row",
".",
"append",
"(",
"i",
")",
"col",
".",
"append",
"(",
"subword",
"+",
"offset",
")",
"data",
".",
"append",
"(",
"1",
"/",
"(",
"1",
"+",
"end",
"-",
"start",
")",
")",
"return",
"(",
"np",
".",
"array",
"(",
"data",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"np",
".",
"array",
"(",
"row",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
",",
"np",
".",
"array",
"(",
"col",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
")"
] | Get a sparse COO array of words and subwords for SkipGram.
Parameters
----------
indices : numpy.ndarray
Array containing numbers in [0, vocabulary_size). The element at
position idx is taken to be the word that occurs at row idx in the
SkipGram batch.
offset : int
Offset to add to each subword index.
subwordidxs : numpy.ndarray
Array containing concatenation of all subwords of all tokens in the
vocabulary, in order of their occurrence in the vocabulary.
For example np.concatenate(idx_to_subwordidxs)
subwordidxsptr
Array containing pointers into subwordidxs array such that
subwordidxs[subwordidxsptr[i]:subwordidxsptr[i+1]] returns all subwords
of of token i. For example subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
offset : int, default 0
Offset to add to each subword index.
Returns
-------
numpy.ndarray of dtype float32
Array containing weights such that for each row, all weights sum to
1. In particular, all elements in a row have weight 1 /
num_elements_in_the_row
numpy.ndarray of dtype int64
This array is the row array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the col array of a sparse array of COO format. | [
"Get",
"a",
"sparse",
"COO",
"array",
"of",
"words",
"and",
"subwords",
"for",
"SkipGram",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L376-L427 | train |
dmlc/gluon-nlp | scripts/word_embeddings/data.py | cbow_lookup | def cbow_lookup(context_row, context_col, subwordidxs, subwordidxsptr,
offset=0):
"""Get a sparse COO array of words and subwords for CBOW.
Parameters
----------
context_row : numpy.ndarray of dtype int64
Array of same length as context_col containing numbers in [0,
batch_size). For each idx, context_row[idx] specifies the row that
context_col[idx] occurs in a sparse matrix.
context_col : numpy.ndarray of dtype int64
Array of same length as context_row containing numbers in [0,
vocabulary_size). For each idx, context_col[idx] is one of the
context words in the context_row[idx] row of the batch.
subwordidxs : numpy.ndarray
Array containing concatenation of all subwords of all tokens in the
vocabulary, in order of their occurrence in the vocabulary.
For example np.concatenate(idx_to_subwordidxs)
subwordidxsptr
Array containing pointers into subwordidxs array such that
subwordidxs[subwordidxsptr[i]:subwordidxsptr[i+1]] returns all subwords
of of token i. For example subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
offset : int, default 0
Offset to add to each subword index.
Returns
-------
numpy.ndarray of dtype float32
Array containing weights summing to 1. The weights are chosen such
that the sum of weights for all subwords and word units of a given
context word is equal to 1 / number_of_context_words_in_the_row.
This array is the data array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the row array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the col array of a sparse array of COO format.
Array containing weights such that for each row, all weights sum to
1. In particular, all elements in a row have weight 1 /
num_elements_in_the_row
"""
row = []
col = []
data = []
num_rows = np.max(context_row) + 1
row_to_numwords = np.zeros(num_rows)
for i, idx in enumerate(context_col):
start = subwordidxsptr[idx]
end = subwordidxsptr[idx + 1]
row_ = context_row[i]
row_to_numwords[row_] += 1
row.append(row_)
col.append(idx)
data.append(1 / (1 + end - start))
for subword in subwordidxs[start:end]:
row.append(row_)
col.append(subword + offset)
data.append(1 / (1 + end - start))
# Normalize by number of words
for i, row_ in enumerate(row):
assert 0 <= row_ <= num_rows
data[i] /= row_to_numwords[row_]
return (np.array(data, dtype=np.float32), np.array(row, dtype=np.int64),
np.array(col, dtype=np.int64)) | python | def cbow_lookup(context_row, context_col, subwordidxs, subwordidxsptr,
offset=0):
"""Get a sparse COO array of words and subwords for CBOW.
Parameters
----------
context_row : numpy.ndarray of dtype int64
Array of same length as context_col containing numbers in [0,
batch_size). For each idx, context_row[idx] specifies the row that
context_col[idx] occurs in a sparse matrix.
context_col : numpy.ndarray of dtype int64
Array of same length as context_row containing numbers in [0,
vocabulary_size). For each idx, context_col[idx] is one of the
context words in the context_row[idx] row of the batch.
subwordidxs : numpy.ndarray
Array containing concatenation of all subwords of all tokens in the
vocabulary, in order of their occurrence in the vocabulary.
For example np.concatenate(idx_to_subwordidxs)
subwordidxsptr
Array containing pointers into subwordidxs array such that
subwordidxs[subwordidxsptr[i]:subwordidxsptr[i+1]] returns all subwords
of of token i. For example subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
offset : int, default 0
Offset to add to each subword index.
Returns
-------
numpy.ndarray of dtype float32
Array containing weights summing to 1. The weights are chosen such
that the sum of weights for all subwords and word units of a given
context word is equal to 1 / number_of_context_words_in_the_row.
This array is the data array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the row array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the col array of a sparse array of COO format.
Array containing weights such that for each row, all weights sum to
1. In particular, all elements in a row have weight 1 /
num_elements_in_the_row
"""
row = []
col = []
data = []
num_rows = np.max(context_row) + 1
row_to_numwords = np.zeros(num_rows)
for i, idx in enumerate(context_col):
start = subwordidxsptr[idx]
end = subwordidxsptr[idx + 1]
row_ = context_row[i]
row_to_numwords[row_] += 1
row.append(row_)
col.append(idx)
data.append(1 / (1 + end - start))
for subword in subwordidxs[start:end]:
row.append(row_)
col.append(subword + offset)
data.append(1 / (1 + end - start))
# Normalize by number of words
for i, row_ in enumerate(row):
assert 0 <= row_ <= num_rows
data[i] /= row_to_numwords[row_]
return (np.array(data, dtype=np.float32), np.array(row, dtype=np.int64),
np.array(col, dtype=np.int64)) | [
"def",
"cbow_lookup",
"(",
"context_row",
",",
"context_col",
",",
"subwordidxs",
",",
"subwordidxsptr",
",",
"offset",
"=",
"0",
")",
":",
"row",
"=",
"[",
"]",
"col",
"=",
"[",
"]",
"data",
"=",
"[",
"]",
"num_rows",
"=",
"np",
".",
"max",
"(",
"context_row",
")",
"+",
"1",
"row_to_numwords",
"=",
"np",
".",
"zeros",
"(",
"num_rows",
")",
"for",
"i",
",",
"idx",
"in",
"enumerate",
"(",
"context_col",
")",
":",
"start",
"=",
"subwordidxsptr",
"[",
"idx",
"]",
"end",
"=",
"subwordidxsptr",
"[",
"idx",
"+",
"1",
"]",
"row_",
"=",
"context_row",
"[",
"i",
"]",
"row_to_numwords",
"[",
"row_",
"]",
"+=",
"1",
"row",
".",
"append",
"(",
"row_",
")",
"col",
".",
"append",
"(",
"idx",
")",
"data",
".",
"append",
"(",
"1",
"/",
"(",
"1",
"+",
"end",
"-",
"start",
")",
")",
"for",
"subword",
"in",
"subwordidxs",
"[",
"start",
":",
"end",
"]",
":",
"row",
".",
"append",
"(",
"row_",
")",
"col",
".",
"append",
"(",
"subword",
"+",
"offset",
")",
"data",
".",
"append",
"(",
"1",
"/",
"(",
"1",
"+",
"end",
"-",
"start",
")",
")",
"# Normalize by number of words",
"for",
"i",
",",
"row_",
"in",
"enumerate",
"(",
"row",
")",
":",
"assert",
"0",
"<=",
"row_",
"<=",
"num_rows",
"data",
"[",
"i",
"]",
"/=",
"row_to_numwords",
"[",
"row_",
"]",
"return",
"(",
"np",
".",
"array",
"(",
"data",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"np",
".",
"array",
"(",
"row",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
",",
"np",
".",
"array",
"(",
"col",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
")"
] | Get a sparse COO array of words and subwords for CBOW.
Parameters
----------
context_row : numpy.ndarray of dtype int64
Array of same length as context_col containing numbers in [0,
batch_size). For each idx, context_row[idx] specifies the row that
context_col[idx] occurs in a sparse matrix.
context_col : numpy.ndarray of dtype int64
Array of same length as context_row containing numbers in [0,
vocabulary_size). For each idx, context_col[idx] is one of the
context words in the context_row[idx] row of the batch.
subwordidxs : numpy.ndarray
Array containing concatenation of all subwords of all tokens in the
vocabulary, in order of their occurrence in the vocabulary.
For example np.concatenate(idx_to_subwordidxs)
subwordidxsptr
Array containing pointers into subwordidxs array such that
subwordidxs[subwordidxsptr[i]:subwordidxsptr[i+1]] returns all subwords
of of token i. For example subwordidxsptr = np.cumsum([
len(subwordidxs) for subwordidxs in idx_to_subwordidxs])
offset : int, default 0
Offset to add to each subword index.
Returns
-------
numpy.ndarray of dtype float32
Array containing weights summing to 1. The weights are chosen such
that the sum of weights for all subwords and word units of a given
context word is equal to 1 / number_of_context_words_in_the_row.
This array is the data array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the row array of a sparse array of COO format.
numpy.ndarray of dtype int64
This array is the col array of a sparse array of COO format.
Array containing weights such that for each row, all weights sum to
1. In particular, all elements in a row have weight 1 /
num_elements_in_the_row | [
"Get",
"a",
"sparse",
"COO",
"array",
"of",
"words",
"and",
"subwords",
"for",
"CBOW",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L431-L501 | train |
dmlc/gluon-nlp | src/gluonnlp/data/translation.py | _TranslationDataset.src_vocab | def src_vocab(self):
"""Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary.
"""
if self._src_vocab is None:
src_vocab_file_name, src_vocab_hash = \
self._data_file[self._pair_key]['vocab' + '_' + self._src_lang]
[src_vocab_path] = self._fetch_data_path([(src_vocab_file_name, src_vocab_hash)])
with io.open(src_vocab_path, 'r', encoding='utf-8') as in_file:
self._src_vocab = Vocab.from_json(in_file.read())
return self._src_vocab | python | def src_vocab(self):
"""Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary.
"""
if self._src_vocab is None:
src_vocab_file_name, src_vocab_hash = \
self._data_file[self._pair_key]['vocab' + '_' + self._src_lang]
[src_vocab_path] = self._fetch_data_path([(src_vocab_file_name, src_vocab_hash)])
with io.open(src_vocab_path, 'r', encoding='utf-8') as in_file:
self._src_vocab = Vocab.from_json(in_file.read())
return self._src_vocab | [
"def",
"src_vocab",
"(",
"self",
")",
":",
"if",
"self",
".",
"_src_vocab",
"is",
"None",
":",
"src_vocab_file_name",
",",
"src_vocab_hash",
"=",
"self",
".",
"_data_file",
"[",
"self",
".",
"_pair_key",
"]",
"[",
"'vocab'",
"+",
"'_'",
"+",
"self",
".",
"_src_lang",
"]",
"[",
"src_vocab_path",
"]",
"=",
"self",
".",
"_fetch_data_path",
"(",
"[",
"(",
"src_vocab_file_name",
",",
"src_vocab_hash",
")",
"]",
")",
"with",
"io",
".",
"open",
"(",
"src_vocab_path",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"in_file",
":",
"self",
".",
"_src_vocab",
"=",
"Vocab",
".",
"from_json",
"(",
"in_file",
".",
"read",
"(",
")",
")",
"return",
"self",
".",
"_src_vocab"
] | Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary. | [
"Source",
"Vocabulary",
"of",
"the",
"Dataset",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/translation.py#L119-L133 | train |
dmlc/gluon-nlp | src/gluonnlp/data/translation.py | _TranslationDataset.tgt_vocab | def tgt_vocab(self):
"""Target Vocabulary of the Dataset.
Returns
-------
tgt_vocab : Vocab
Target vocabulary.
"""
if self._tgt_vocab is None:
tgt_vocab_file_name, tgt_vocab_hash = \
self._data_file[self._pair_key]['vocab' + '_' + self._tgt_lang]
[tgt_vocab_path] = self._fetch_data_path([(tgt_vocab_file_name, tgt_vocab_hash)])
with io.open(tgt_vocab_path, 'r', encoding='utf-8') as in_file:
self._tgt_vocab = Vocab.from_json(in_file.read())
return self._tgt_vocab | python | def tgt_vocab(self):
"""Target Vocabulary of the Dataset.
Returns
-------
tgt_vocab : Vocab
Target vocabulary.
"""
if self._tgt_vocab is None:
tgt_vocab_file_name, tgt_vocab_hash = \
self._data_file[self._pair_key]['vocab' + '_' + self._tgt_lang]
[tgt_vocab_path] = self._fetch_data_path([(tgt_vocab_file_name, tgt_vocab_hash)])
with io.open(tgt_vocab_path, 'r', encoding='utf-8') as in_file:
self._tgt_vocab = Vocab.from_json(in_file.read())
return self._tgt_vocab | [
"def",
"tgt_vocab",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tgt_vocab",
"is",
"None",
":",
"tgt_vocab_file_name",
",",
"tgt_vocab_hash",
"=",
"self",
".",
"_data_file",
"[",
"self",
".",
"_pair_key",
"]",
"[",
"'vocab'",
"+",
"'_'",
"+",
"self",
".",
"_tgt_lang",
"]",
"[",
"tgt_vocab_path",
"]",
"=",
"self",
".",
"_fetch_data_path",
"(",
"[",
"(",
"tgt_vocab_file_name",
",",
"tgt_vocab_hash",
")",
"]",
")",
"with",
"io",
".",
"open",
"(",
"tgt_vocab_path",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"in_file",
":",
"self",
".",
"_tgt_vocab",
"=",
"Vocab",
".",
"from_json",
"(",
"in_file",
".",
"read",
"(",
")",
")",
"return",
"self",
".",
"_tgt_vocab"
] | Target Vocabulary of the Dataset.
Returns
-------
tgt_vocab : Vocab
Target vocabulary. | [
"Target",
"Vocabulary",
"of",
"the",
"Dataset",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/translation.py#L136-L150 | train |
dmlc/gluon-nlp | scripts/machine_translation/train_gnmt.py | evaluate | def evaluate(data_loader):
"""Evaluate given the data loader
Parameters
----------
data_loader : DataLoader
Returns
-------
avg_loss : float
Average loss
real_translation_out : list of list of str
The translation output
"""
translation_out = []
all_inst_ids = []
avg_loss_denom = 0
avg_loss = 0.0
for _, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) \
in enumerate(data_loader):
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
# Calculating Loss
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar()
all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist())
avg_loss += loss * (tgt_seq.shape[1] - 1)
avg_loss_denom += (tgt_seq.shape[1] - 1)
# Translate
samples, _, sample_valid_length =\
translator.translate(src_seq=src_seq, src_valid_length=src_valid_length)
max_score_sample = samples[:, 0, :].asnumpy()
sample_valid_length = sample_valid_length[:, 0].asnumpy()
for i in range(max_score_sample.shape[0]):
translation_out.append(
[tgt_vocab.idx_to_token[ele] for ele in
max_score_sample[i][1:(sample_valid_length[i] - 1)]])
avg_loss = avg_loss / avg_loss_denom
real_translation_out = [None for _ in range(len(all_inst_ids))]
for ind, sentence in zip(all_inst_ids, translation_out):
real_translation_out[ind] = sentence
return avg_loss, real_translation_out | python | def evaluate(data_loader):
"""Evaluate given the data loader
Parameters
----------
data_loader : DataLoader
Returns
-------
avg_loss : float
Average loss
real_translation_out : list of list of str
The translation output
"""
translation_out = []
all_inst_ids = []
avg_loss_denom = 0
avg_loss = 0.0
for _, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) \
in enumerate(data_loader):
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
# Calculating Loss
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar()
all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist())
avg_loss += loss * (tgt_seq.shape[1] - 1)
avg_loss_denom += (tgt_seq.shape[1] - 1)
# Translate
samples, _, sample_valid_length =\
translator.translate(src_seq=src_seq, src_valid_length=src_valid_length)
max_score_sample = samples[:, 0, :].asnumpy()
sample_valid_length = sample_valid_length[:, 0].asnumpy()
for i in range(max_score_sample.shape[0]):
translation_out.append(
[tgt_vocab.idx_to_token[ele] for ele in
max_score_sample[i][1:(sample_valid_length[i] - 1)]])
avg_loss = avg_loss / avg_loss_denom
real_translation_out = [None for _ in range(len(all_inst_ids))]
for ind, sentence in zip(all_inst_ids, translation_out):
real_translation_out[ind] = sentence
return avg_loss, real_translation_out | [
"def",
"evaluate",
"(",
"data_loader",
")",
":",
"translation_out",
"=",
"[",
"]",
"all_inst_ids",
"=",
"[",
"]",
"avg_loss_denom",
"=",
"0",
"avg_loss",
"=",
"0.0",
"for",
"_",
",",
"(",
"src_seq",
",",
"tgt_seq",
",",
"src_valid_length",
",",
"tgt_valid_length",
",",
"inst_ids",
")",
"in",
"enumerate",
"(",
"data_loader",
")",
":",
"src_seq",
"=",
"src_seq",
".",
"as_in_context",
"(",
"ctx",
")",
"tgt_seq",
"=",
"tgt_seq",
".",
"as_in_context",
"(",
"ctx",
")",
"src_valid_length",
"=",
"src_valid_length",
".",
"as_in_context",
"(",
"ctx",
")",
"tgt_valid_length",
"=",
"tgt_valid_length",
".",
"as_in_context",
"(",
"ctx",
")",
"# Calculating Loss",
"out",
",",
"_",
"=",
"model",
"(",
"src_seq",
",",
"tgt_seq",
"[",
":",
",",
":",
"-",
"1",
"]",
",",
"src_valid_length",
",",
"tgt_valid_length",
"-",
"1",
")",
"loss",
"=",
"loss_function",
"(",
"out",
",",
"tgt_seq",
"[",
":",
",",
"1",
":",
"]",
",",
"tgt_valid_length",
"-",
"1",
")",
".",
"mean",
"(",
")",
".",
"asscalar",
"(",
")",
"all_inst_ids",
".",
"extend",
"(",
"inst_ids",
".",
"asnumpy",
"(",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
".",
"tolist",
"(",
")",
")",
"avg_loss",
"+=",
"loss",
"*",
"(",
"tgt_seq",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"avg_loss_denom",
"+=",
"(",
"tgt_seq",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"# Translate",
"samples",
",",
"_",
",",
"sample_valid_length",
"=",
"translator",
".",
"translate",
"(",
"src_seq",
"=",
"src_seq",
",",
"src_valid_length",
"=",
"src_valid_length",
")",
"max_score_sample",
"=",
"samples",
"[",
":",
",",
"0",
",",
":",
"]",
".",
"asnumpy",
"(",
")",
"sample_valid_length",
"=",
"sample_valid_length",
"[",
":",
",",
"0",
"]",
".",
"asnumpy",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"max_score_sample",
".",
"shape",
"[",
"0",
"]",
")",
":",
"translation_out",
".",
"append",
"(",
"[",
"tgt_vocab",
".",
"idx_to_token",
"[",
"ele",
"]",
"for",
"ele",
"in",
"max_score_sample",
"[",
"i",
"]",
"[",
"1",
":",
"(",
"sample_valid_length",
"[",
"i",
"]",
"-",
"1",
")",
"]",
"]",
")",
"avg_loss",
"=",
"avg_loss",
"/",
"avg_loss_denom",
"real_translation_out",
"=",
"[",
"None",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"all_inst_ids",
")",
")",
"]",
"for",
"ind",
",",
"sentence",
"in",
"zip",
"(",
"all_inst_ids",
",",
"translation_out",
")",
":",
"real_translation_out",
"[",
"ind",
"]",
"=",
"sentence",
"return",
"avg_loss",
",",
"real_translation_out"
] | Evaluate given the data loader
Parameters
----------
data_loader : DataLoader
Returns
-------
avg_loss : float
Average loss
real_translation_out : list of list of str
The translation output | [
"Evaluate",
"given",
"the",
"data",
"loader"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/train_gnmt.py#L147-L190 | train |
dmlc/gluon-nlp | scripts/machine_translation/train_gnmt.py | train | def train():
"""Training function."""
trainer = gluon.Trainer(model.collect_params(), args.optimizer, {'learning_rate': args.lr})
train_data_loader, val_data_loader, test_data_loader \
= dataprocessor.make_dataloader(data_train, data_val, data_test, args)
best_valid_bleu = 0.0
for epoch_id in range(args.epochs):
log_avg_loss = 0
log_avg_gnorm = 0
log_wc = 0
log_start_time = time.time()
for batch_id, (src_seq, tgt_seq, src_valid_length, tgt_valid_length)\
in enumerate(train_data_loader):
# logging.info(src_seq.context) Context suddenly becomes GPU.
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
with mx.autograd.record():
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean()
loss = loss * (tgt_seq.shape[1] - 1) / (tgt_valid_length - 1).mean()
loss.backward()
grads = [p.grad(ctx) for p in model.collect_params().values()]
gnorm = gluon.utils.clip_global_norm(grads, args.clip)
trainer.step(1)
src_wc = src_valid_length.sum().asscalar()
tgt_wc = (tgt_valid_length - 1).sum().asscalar()
step_loss = loss.asscalar()
log_avg_loss += step_loss
log_avg_gnorm += gnorm
log_wc += src_wc + tgt_wc
if (batch_id + 1) % args.log_interval == 0:
wps = log_wc / (time.time() - log_start_time)
logging.info('[Epoch {} Batch {}/{}] loss={:.4f}, ppl={:.4f}, gnorm={:.4f}, '
'throughput={:.2f}K wps, wc={:.2f}K'
.format(epoch_id, batch_id + 1, len(train_data_loader),
log_avg_loss / args.log_interval,
np.exp(log_avg_loss / args.log_interval),
log_avg_gnorm / args.log_interval,
wps / 1000, log_wc / 1000))
log_start_time = time.time()
log_avg_loss = 0
log_avg_gnorm = 0
log_wc = 0
valid_loss, valid_translation_out = evaluate(val_data_loader)
valid_bleu_score, _, _, _, _ = compute_bleu([val_tgt_sentences], valid_translation_out)
logging.info('[Epoch {}] valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'
.format(epoch_id, valid_loss, np.exp(valid_loss), valid_bleu_score * 100))
test_loss, test_translation_out = evaluate(test_data_loader)
test_bleu_score, _, _, _, _ = compute_bleu([test_tgt_sentences], test_translation_out)
logging.info('[Epoch {}] test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'
.format(epoch_id, test_loss, np.exp(test_loss), test_bleu_score * 100))
dataprocessor.write_sentences(valid_translation_out,
os.path.join(args.save_dir,
'epoch{:d}_valid_out.txt').format(epoch_id))
dataprocessor.write_sentences(test_translation_out,
os.path.join(args.save_dir,
'epoch{:d}_test_out.txt').format(epoch_id))
if valid_bleu_score > best_valid_bleu:
best_valid_bleu = valid_bleu_score
save_path = os.path.join(args.save_dir, 'valid_best.params')
logging.info('Save best parameters to {}'.format(save_path))
model.save_parameters(save_path)
if epoch_id + 1 >= (args.epochs * 2) // 3:
new_lr = trainer.learning_rate * args.lr_update_factor
logging.info('Learning rate change to {}'.format(new_lr))
trainer.set_learning_rate(new_lr)
if os.path.exists(os.path.join(args.save_dir, 'valid_best.params')):
model.load_parameters(os.path.join(args.save_dir, 'valid_best.params'))
valid_loss, valid_translation_out = evaluate(val_data_loader)
valid_bleu_score, _, _, _, _ = compute_bleu([val_tgt_sentences], valid_translation_out)
logging.info('Best model valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'
.format(valid_loss, np.exp(valid_loss), valid_bleu_score * 100))
test_loss, test_translation_out = evaluate(test_data_loader)
test_bleu_score, _, _, _, _ = compute_bleu([test_tgt_sentences], test_translation_out)
logging.info('Best model test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'
.format(test_loss, np.exp(test_loss), test_bleu_score * 100))
dataprocessor.write_sentences(valid_translation_out,
os.path.join(args.save_dir, 'best_valid_out.txt'))
dataprocessor.write_sentences(test_translation_out,
os.path.join(args.save_dir, 'best_test_out.txt')) | python | def train():
"""Training function."""
trainer = gluon.Trainer(model.collect_params(), args.optimizer, {'learning_rate': args.lr})
train_data_loader, val_data_loader, test_data_loader \
= dataprocessor.make_dataloader(data_train, data_val, data_test, args)
best_valid_bleu = 0.0
for epoch_id in range(args.epochs):
log_avg_loss = 0
log_avg_gnorm = 0
log_wc = 0
log_start_time = time.time()
for batch_id, (src_seq, tgt_seq, src_valid_length, tgt_valid_length)\
in enumerate(train_data_loader):
# logging.info(src_seq.context) Context suddenly becomes GPU.
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
with mx.autograd.record():
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean()
loss = loss * (tgt_seq.shape[1] - 1) / (tgt_valid_length - 1).mean()
loss.backward()
grads = [p.grad(ctx) for p in model.collect_params().values()]
gnorm = gluon.utils.clip_global_norm(grads, args.clip)
trainer.step(1)
src_wc = src_valid_length.sum().asscalar()
tgt_wc = (tgt_valid_length - 1).sum().asscalar()
step_loss = loss.asscalar()
log_avg_loss += step_loss
log_avg_gnorm += gnorm
log_wc += src_wc + tgt_wc
if (batch_id + 1) % args.log_interval == 0:
wps = log_wc / (time.time() - log_start_time)
logging.info('[Epoch {} Batch {}/{}] loss={:.4f}, ppl={:.4f}, gnorm={:.4f}, '
'throughput={:.2f}K wps, wc={:.2f}K'
.format(epoch_id, batch_id + 1, len(train_data_loader),
log_avg_loss / args.log_interval,
np.exp(log_avg_loss / args.log_interval),
log_avg_gnorm / args.log_interval,
wps / 1000, log_wc / 1000))
log_start_time = time.time()
log_avg_loss = 0
log_avg_gnorm = 0
log_wc = 0
valid_loss, valid_translation_out = evaluate(val_data_loader)
valid_bleu_score, _, _, _, _ = compute_bleu([val_tgt_sentences], valid_translation_out)
logging.info('[Epoch {}] valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'
.format(epoch_id, valid_loss, np.exp(valid_loss), valid_bleu_score * 100))
test_loss, test_translation_out = evaluate(test_data_loader)
test_bleu_score, _, _, _, _ = compute_bleu([test_tgt_sentences], test_translation_out)
logging.info('[Epoch {}] test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'
.format(epoch_id, test_loss, np.exp(test_loss), test_bleu_score * 100))
dataprocessor.write_sentences(valid_translation_out,
os.path.join(args.save_dir,
'epoch{:d}_valid_out.txt').format(epoch_id))
dataprocessor.write_sentences(test_translation_out,
os.path.join(args.save_dir,
'epoch{:d}_test_out.txt').format(epoch_id))
if valid_bleu_score > best_valid_bleu:
best_valid_bleu = valid_bleu_score
save_path = os.path.join(args.save_dir, 'valid_best.params')
logging.info('Save best parameters to {}'.format(save_path))
model.save_parameters(save_path)
if epoch_id + 1 >= (args.epochs * 2) // 3:
new_lr = trainer.learning_rate * args.lr_update_factor
logging.info('Learning rate change to {}'.format(new_lr))
trainer.set_learning_rate(new_lr)
if os.path.exists(os.path.join(args.save_dir, 'valid_best.params')):
model.load_parameters(os.path.join(args.save_dir, 'valid_best.params'))
valid_loss, valid_translation_out = evaluate(val_data_loader)
valid_bleu_score, _, _, _, _ = compute_bleu([val_tgt_sentences], valid_translation_out)
logging.info('Best model valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'
.format(valid_loss, np.exp(valid_loss), valid_bleu_score * 100))
test_loss, test_translation_out = evaluate(test_data_loader)
test_bleu_score, _, _, _, _ = compute_bleu([test_tgt_sentences], test_translation_out)
logging.info('Best model test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'
.format(test_loss, np.exp(test_loss), test_bleu_score * 100))
dataprocessor.write_sentences(valid_translation_out,
os.path.join(args.save_dir, 'best_valid_out.txt'))
dataprocessor.write_sentences(test_translation_out,
os.path.join(args.save_dir, 'best_test_out.txt')) | [
"def",
"train",
"(",
")",
":",
"trainer",
"=",
"gluon",
".",
"Trainer",
"(",
"model",
".",
"collect_params",
"(",
")",
",",
"args",
".",
"optimizer",
",",
"{",
"'learning_rate'",
":",
"args",
".",
"lr",
"}",
")",
"train_data_loader",
",",
"val_data_loader",
",",
"test_data_loader",
"=",
"dataprocessor",
".",
"make_dataloader",
"(",
"data_train",
",",
"data_val",
",",
"data_test",
",",
"args",
")",
"best_valid_bleu",
"=",
"0.0",
"for",
"epoch_id",
"in",
"range",
"(",
"args",
".",
"epochs",
")",
":",
"log_avg_loss",
"=",
"0",
"log_avg_gnorm",
"=",
"0",
"log_wc",
"=",
"0",
"log_start_time",
"=",
"time",
".",
"time",
"(",
")",
"for",
"batch_id",
",",
"(",
"src_seq",
",",
"tgt_seq",
",",
"src_valid_length",
",",
"tgt_valid_length",
")",
"in",
"enumerate",
"(",
"train_data_loader",
")",
":",
"# logging.info(src_seq.context) Context suddenly becomes GPU.",
"src_seq",
"=",
"src_seq",
".",
"as_in_context",
"(",
"ctx",
")",
"tgt_seq",
"=",
"tgt_seq",
".",
"as_in_context",
"(",
"ctx",
")",
"src_valid_length",
"=",
"src_valid_length",
".",
"as_in_context",
"(",
"ctx",
")",
"tgt_valid_length",
"=",
"tgt_valid_length",
".",
"as_in_context",
"(",
"ctx",
")",
"with",
"mx",
".",
"autograd",
".",
"record",
"(",
")",
":",
"out",
",",
"_",
"=",
"model",
"(",
"src_seq",
",",
"tgt_seq",
"[",
":",
",",
":",
"-",
"1",
"]",
",",
"src_valid_length",
",",
"tgt_valid_length",
"-",
"1",
")",
"loss",
"=",
"loss_function",
"(",
"out",
",",
"tgt_seq",
"[",
":",
",",
"1",
":",
"]",
",",
"tgt_valid_length",
"-",
"1",
")",
".",
"mean",
"(",
")",
"loss",
"=",
"loss",
"*",
"(",
"tgt_seq",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"/",
"(",
"tgt_valid_length",
"-",
"1",
")",
".",
"mean",
"(",
")",
"loss",
".",
"backward",
"(",
")",
"grads",
"=",
"[",
"p",
".",
"grad",
"(",
"ctx",
")",
"for",
"p",
"in",
"model",
".",
"collect_params",
"(",
")",
".",
"values",
"(",
")",
"]",
"gnorm",
"=",
"gluon",
".",
"utils",
".",
"clip_global_norm",
"(",
"grads",
",",
"args",
".",
"clip",
")",
"trainer",
".",
"step",
"(",
"1",
")",
"src_wc",
"=",
"src_valid_length",
".",
"sum",
"(",
")",
".",
"asscalar",
"(",
")",
"tgt_wc",
"=",
"(",
"tgt_valid_length",
"-",
"1",
")",
".",
"sum",
"(",
")",
".",
"asscalar",
"(",
")",
"step_loss",
"=",
"loss",
".",
"asscalar",
"(",
")",
"log_avg_loss",
"+=",
"step_loss",
"log_avg_gnorm",
"+=",
"gnorm",
"log_wc",
"+=",
"src_wc",
"+",
"tgt_wc",
"if",
"(",
"batch_id",
"+",
"1",
")",
"%",
"args",
".",
"log_interval",
"==",
"0",
":",
"wps",
"=",
"log_wc",
"/",
"(",
"time",
".",
"time",
"(",
")",
"-",
"log_start_time",
")",
"logging",
".",
"info",
"(",
"'[Epoch {} Batch {}/{}] loss={:.4f}, ppl={:.4f}, gnorm={:.4f}, '",
"'throughput={:.2f}K wps, wc={:.2f}K'",
".",
"format",
"(",
"epoch_id",
",",
"batch_id",
"+",
"1",
",",
"len",
"(",
"train_data_loader",
")",
",",
"log_avg_loss",
"/",
"args",
".",
"log_interval",
",",
"np",
".",
"exp",
"(",
"log_avg_loss",
"/",
"args",
".",
"log_interval",
")",
",",
"log_avg_gnorm",
"/",
"args",
".",
"log_interval",
",",
"wps",
"/",
"1000",
",",
"log_wc",
"/",
"1000",
")",
")",
"log_start_time",
"=",
"time",
".",
"time",
"(",
")",
"log_avg_loss",
"=",
"0",
"log_avg_gnorm",
"=",
"0",
"log_wc",
"=",
"0",
"valid_loss",
",",
"valid_translation_out",
"=",
"evaluate",
"(",
"val_data_loader",
")",
"valid_bleu_score",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
"=",
"compute_bleu",
"(",
"[",
"val_tgt_sentences",
"]",
",",
"valid_translation_out",
")",
"logging",
".",
"info",
"(",
"'[Epoch {}] valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'",
".",
"format",
"(",
"epoch_id",
",",
"valid_loss",
",",
"np",
".",
"exp",
"(",
"valid_loss",
")",
",",
"valid_bleu_score",
"*",
"100",
")",
")",
"test_loss",
",",
"test_translation_out",
"=",
"evaluate",
"(",
"test_data_loader",
")",
"test_bleu_score",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
"=",
"compute_bleu",
"(",
"[",
"test_tgt_sentences",
"]",
",",
"test_translation_out",
")",
"logging",
".",
"info",
"(",
"'[Epoch {}] test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'",
".",
"format",
"(",
"epoch_id",
",",
"test_loss",
",",
"np",
".",
"exp",
"(",
"test_loss",
")",
",",
"test_bleu_score",
"*",
"100",
")",
")",
"dataprocessor",
".",
"write_sentences",
"(",
"valid_translation_out",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"save_dir",
",",
"'epoch{:d}_valid_out.txt'",
")",
".",
"format",
"(",
"epoch_id",
")",
")",
"dataprocessor",
".",
"write_sentences",
"(",
"test_translation_out",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"save_dir",
",",
"'epoch{:d}_test_out.txt'",
")",
".",
"format",
"(",
"epoch_id",
")",
")",
"if",
"valid_bleu_score",
">",
"best_valid_bleu",
":",
"best_valid_bleu",
"=",
"valid_bleu_score",
"save_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"save_dir",
",",
"'valid_best.params'",
")",
"logging",
".",
"info",
"(",
"'Save best parameters to {}'",
".",
"format",
"(",
"save_path",
")",
")",
"model",
".",
"save_parameters",
"(",
"save_path",
")",
"if",
"epoch_id",
"+",
"1",
">=",
"(",
"args",
".",
"epochs",
"*",
"2",
")",
"//",
"3",
":",
"new_lr",
"=",
"trainer",
".",
"learning_rate",
"*",
"args",
".",
"lr_update_factor",
"logging",
".",
"info",
"(",
"'Learning rate change to {}'",
".",
"format",
"(",
"new_lr",
")",
")",
"trainer",
".",
"set_learning_rate",
"(",
"new_lr",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"save_dir",
",",
"'valid_best.params'",
")",
")",
":",
"model",
".",
"load_parameters",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"save_dir",
",",
"'valid_best.params'",
")",
")",
"valid_loss",
",",
"valid_translation_out",
"=",
"evaluate",
"(",
"val_data_loader",
")",
"valid_bleu_score",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
"=",
"compute_bleu",
"(",
"[",
"val_tgt_sentences",
"]",
",",
"valid_translation_out",
")",
"logging",
".",
"info",
"(",
"'Best model valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'",
".",
"format",
"(",
"valid_loss",
",",
"np",
".",
"exp",
"(",
"valid_loss",
")",
",",
"valid_bleu_score",
"*",
"100",
")",
")",
"test_loss",
",",
"test_translation_out",
"=",
"evaluate",
"(",
"test_data_loader",
")",
"test_bleu_score",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
"=",
"compute_bleu",
"(",
"[",
"test_tgt_sentences",
"]",
",",
"test_translation_out",
")",
"logging",
".",
"info",
"(",
"'Best model test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'",
".",
"format",
"(",
"test_loss",
",",
"np",
".",
"exp",
"(",
"test_loss",
")",
",",
"test_bleu_score",
"*",
"100",
")",
")",
"dataprocessor",
".",
"write_sentences",
"(",
"valid_translation_out",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"save_dir",
",",
"'best_valid_out.txt'",
")",
")",
"dataprocessor",
".",
"write_sentences",
"(",
"test_translation_out",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"save_dir",
",",
"'best_test_out.txt'",
")",
")"
] | Training function. | [
"Training",
"function",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/train_gnmt.py#L193-L276 | train |
dmlc/gluon-nlp | src/gluonnlp/model/train/__init__.py | get_cache_model | def get_cache_model(name, dataset_name='wikitext-2', window=2000,
theta=0.6, lambdas=0.2, ctx=mx.cpu(), **kwargs):
r"""Returns a cache model using a pre-trained language model.
We implement the neural cache language model proposed in the following work::
@article{grave2016improving,
title={Improving neural language models with a continuous cache},
author={Grave, Edouard and Joulin, Armand and Usunier, Nicolas},
journal={ICLR},
year={2017}
}
Parameters
----------
name : str
Name of the cache language model.
dataset_name : str or None, default 'wikitext-2'.
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
window : int
Size of cache window
theta : float
The scala controls the flatness of the cache distribution
that predict the next word as shown below:
.. math::
p_{cache} \propto \sum_{i=1}^{t-1} \mathbb{1}_{w=x_{i+1}} exp(\theta {h_t}^T h_i)
where :math:`p_{cache}` is the cache distribution, :math:`\mathbb{1}` is
the identity function, and :math:`h_i` is the output of timestep i.
lambdas : float
Linear scalar between only cache and vocab distribution, the formulation is as below:
.. math::
p = (1 - \lambda) p_{vocab} + \lambda p_{cache}
where :math:`p_{vocab}` is the vocabulary distribution and :math:`p_{cache}`
is the cache distribution.
vocab : gluonnlp.Vocab or None, default None
Vocabulary object to be used with the language model.
Required when dataset_name is not specified.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '~/.mxnet/models'
Location for keeping the pre-trained model parameters.
Returns
-------
Block
The model.
"""
lm_model, vocab = nlp.model.\
get_model(name, dataset_name=dataset_name, pretrained=True, ctx=ctx, **kwargs)
cache_cell = CacheCell(lm_model, len(vocab), window, theta, lambdas)
return cache_cell | python | def get_cache_model(name, dataset_name='wikitext-2', window=2000,
theta=0.6, lambdas=0.2, ctx=mx.cpu(), **kwargs):
r"""Returns a cache model using a pre-trained language model.
We implement the neural cache language model proposed in the following work::
@article{grave2016improving,
title={Improving neural language models with a continuous cache},
author={Grave, Edouard and Joulin, Armand and Usunier, Nicolas},
journal={ICLR},
year={2017}
}
Parameters
----------
name : str
Name of the cache language model.
dataset_name : str or None, default 'wikitext-2'.
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
window : int
Size of cache window
theta : float
The scala controls the flatness of the cache distribution
that predict the next word as shown below:
.. math::
p_{cache} \propto \sum_{i=1}^{t-1} \mathbb{1}_{w=x_{i+1}} exp(\theta {h_t}^T h_i)
where :math:`p_{cache}` is the cache distribution, :math:`\mathbb{1}` is
the identity function, and :math:`h_i` is the output of timestep i.
lambdas : float
Linear scalar between only cache and vocab distribution, the formulation is as below:
.. math::
p = (1 - \lambda) p_{vocab} + \lambda p_{cache}
where :math:`p_{vocab}` is the vocabulary distribution and :math:`p_{cache}`
is the cache distribution.
vocab : gluonnlp.Vocab or None, default None
Vocabulary object to be used with the language model.
Required when dataset_name is not specified.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '~/.mxnet/models'
Location for keeping the pre-trained model parameters.
Returns
-------
Block
The model.
"""
lm_model, vocab = nlp.model.\
get_model(name, dataset_name=dataset_name, pretrained=True, ctx=ctx, **kwargs)
cache_cell = CacheCell(lm_model, len(vocab), window, theta, lambdas)
return cache_cell | [
"def",
"get_cache_model",
"(",
"name",
",",
"dataset_name",
"=",
"'wikitext-2'",
",",
"window",
"=",
"2000",
",",
"theta",
"=",
"0.6",
",",
"lambdas",
"=",
"0.2",
",",
"ctx",
"=",
"mx",
".",
"cpu",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"lm_model",
",",
"vocab",
"=",
"nlp",
".",
"model",
".",
"get_model",
"(",
"name",
",",
"dataset_name",
"=",
"dataset_name",
",",
"pretrained",
"=",
"True",
",",
"ctx",
"=",
"ctx",
",",
"*",
"*",
"kwargs",
")",
"cache_cell",
"=",
"CacheCell",
"(",
"lm_model",
",",
"len",
"(",
"vocab",
")",
",",
"window",
",",
"theta",
",",
"lambdas",
")",
"return",
"cache_cell"
] | r"""Returns a cache model using a pre-trained language model.
We implement the neural cache language model proposed in the following work::
@article{grave2016improving,
title={Improving neural language models with a continuous cache},
author={Grave, Edouard and Joulin, Armand and Usunier, Nicolas},
journal={ICLR},
year={2017}
}
Parameters
----------
name : str
Name of the cache language model.
dataset_name : str or None, default 'wikitext-2'.
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
window : int
Size of cache window
theta : float
The scala controls the flatness of the cache distribution
that predict the next word as shown below:
.. math::
p_{cache} \propto \sum_{i=1}^{t-1} \mathbb{1}_{w=x_{i+1}} exp(\theta {h_t}^T h_i)
where :math:`p_{cache}` is the cache distribution, :math:`\mathbb{1}` is
the identity function, and :math:`h_i` is the output of timestep i.
lambdas : float
Linear scalar between only cache and vocab distribution, the formulation is as below:
.. math::
p = (1 - \lambda) p_{vocab} + \lambda p_{cache}
where :math:`p_{vocab}` is the vocabulary distribution and :math:`p_{cache}`
is the cache distribution.
vocab : gluonnlp.Vocab or None, default None
Vocabulary object to be used with the language model.
Required when dataset_name is not specified.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '~/.mxnet/models'
Location for keeping the pre-trained model parameters.
Returns
-------
Block
The model. | [
"r",
"Returns",
"a",
"cache",
"model",
"using",
"a",
"pre",
"-",
"trained",
"language",
"model",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/train/__init__.py#L36-L98 | train |
dmlc/gluon-nlp | scripts/word_embeddings/train_sg_cbow.py | train | def train(args):
"""Training helper."""
if not args.model.lower() in ['cbow', 'skipgram']:
logging.error('Unsupported model %s.', args.model)
sys.exit(1)
if args.data.lower() == 'toy':
data = mx.gluon.data.SimpleDataset(nlp.data.Text8(segment='train')[:2])
data, vocab, idx_to_counts = preprocess_dataset(
data, max_vocab_size=args.max_vocab_size)
elif args.data.lower() == 'text8':
data = nlp.data.Text8(segment='train')
data, vocab, idx_to_counts = preprocess_dataset(
data, max_vocab_size=args.max_vocab_size)
elif args.data.lower() == 'fil9':
data = nlp.data.Fil9(max_sentence_length=10000)
data, vocab, idx_to_counts = preprocess_dataset(
data, max_vocab_size=args.max_vocab_size)
elif args.data.lower() == 'wiki':
data, vocab, idx_to_counts = wiki(args.wiki_root, args.wiki_date,
args.wiki_language,
args.max_vocab_size)
if args.ngram_buckets > 0:
data, batchify_fn, subword_function = transform_data_fasttext(
data, vocab, idx_to_counts, cbow=args.model.lower() == 'cbow',
ngram_buckets=args.ngram_buckets, ngrams=args.ngrams,
batch_size=args.batch_size, window_size=args.window,
frequent_token_subsampling=args.frequent_token_subsampling)
else:
subword_function = None
data, batchify_fn = transform_data_word2vec(
data, vocab, idx_to_counts, cbow=args.model.lower() == 'cbow',
batch_size=args.batch_size, window_size=args.window,
frequent_token_subsampling=args.frequent_token_subsampling)
num_tokens = float(sum(idx_to_counts))
model = CBOW if args.model.lower() == 'cbow' else SG
embedding = model(token_to_idx=vocab.token_to_idx, output_dim=args.emsize,
batch_size=args.batch_size, num_negatives=args.negative,
negatives_weights=mx.nd.array(idx_to_counts),
subword_function=subword_function)
context = get_context(args)
embedding.initialize(ctx=context)
if not args.no_hybridize:
embedding.hybridize(static_alloc=True, static_shape=True)
optimizer_kwargs = dict(learning_rate=args.lr)
try:
trainer = mx.gluon.Trainer(embedding.collect_params(), args.optimizer,
optimizer_kwargs)
except ValueError as e:
if args.optimizer == 'groupadagrad':
logging.warning('MXNet <= v1.3 does not contain '
'GroupAdaGrad support. Falling back to AdaGrad')
trainer = mx.gluon.Trainer(embedding.collect_params(), 'adagrad',
optimizer_kwargs)
else:
raise e
try:
if args.no_prefetch_batch:
data = data.transform(batchify_fn)
else:
from executors import LazyThreadPoolExecutor
num_cpu = len(os.sched_getaffinity(0))
ex = LazyThreadPoolExecutor(num_cpu)
except (ImportError, SyntaxError, AttributeError):
# Py2 - no async prefetching is supported
logging.warning(
'Asynchronous batch prefetching is not supported on Python 2. '
'Consider upgrading to Python 3 for improved performance.')
data = data.transform(batchify_fn)
num_update = 0
prefetched_iters = []
for _ in range(min(args.num_prefetch_epoch, args.epochs)):
prefetched_iters.append(iter(data))
for epoch in range(args.epochs):
if epoch + len(prefetched_iters) < args.epochs:
prefetched_iters.append(iter(data))
data_iter = prefetched_iters.pop(0)
try:
batches = ex.map(batchify_fn, data_iter)
except NameError: # Py 2 or batch prefetching disabled
batches = data_iter
# Logging variables
log_wc = 0
log_start_time = time.time()
log_avg_loss = 0
for i, batch in enumerate(batches):
ctx = context[i % len(context)]
batch = [array.as_in_context(ctx) for array in batch]
with mx.autograd.record():
loss = embedding(*batch)
loss.backward()
num_update += loss.shape[0]
if len(context) == 1 or (i + 1) % len(context) == 0:
trainer.step(batch_size=1)
# Logging
log_wc += loss.shape[0]
log_avg_loss += loss.mean().as_in_context(context[0])
if (i + 1) % args.log_interval == 0:
# Forces waiting for computation by computing loss value
log_avg_loss = log_avg_loss.asscalar() / args.log_interval
wps = log_wc / (time.time() - log_start_time)
# Due to subsampling, the overall number of batches is an upper
# bound
num_batches = num_tokens // args.batch_size
if args.model.lower() == 'skipgram':
num_batches = (num_tokens * args.window * 2) // args.batch_size
else:
num_batches = num_tokens // args.batch_size
logging.info('[Epoch {} Batch {}/{}] loss={:.4f}, '
'throughput={:.2f}K wps, wc={:.2f}K'.format(
epoch, i + 1, num_batches, log_avg_loss,
wps / 1000, log_wc / 1000))
log_start_time = time.time()
log_avg_loss = 0
log_wc = 0
if args.eval_interval and (i + 1) % args.eval_interval == 0:
with print_time('mx.nd.waitall()'):
mx.nd.waitall()
with print_time('evaluate'):
evaluate(args, embedding, vocab, num_update)
# Evaluate
with print_time('mx.nd.waitall()'):
mx.nd.waitall()
with print_time('evaluate'):
evaluate(args, embedding, vocab, num_update,
eval_analogy=not args.no_eval_analogy)
# Save params
with print_time('save parameters'):
embedding.save_parameters(os.path.join(args.logdir, 'embedding.params')) | python | def train(args):
"""Training helper."""
if not args.model.lower() in ['cbow', 'skipgram']:
logging.error('Unsupported model %s.', args.model)
sys.exit(1)
if args.data.lower() == 'toy':
data = mx.gluon.data.SimpleDataset(nlp.data.Text8(segment='train')[:2])
data, vocab, idx_to_counts = preprocess_dataset(
data, max_vocab_size=args.max_vocab_size)
elif args.data.lower() == 'text8':
data = nlp.data.Text8(segment='train')
data, vocab, idx_to_counts = preprocess_dataset(
data, max_vocab_size=args.max_vocab_size)
elif args.data.lower() == 'fil9':
data = nlp.data.Fil9(max_sentence_length=10000)
data, vocab, idx_to_counts = preprocess_dataset(
data, max_vocab_size=args.max_vocab_size)
elif args.data.lower() == 'wiki':
data, vocab, idx_to_counts = wiki(args.wiki_root, args.wiki_date,
args.wiki_language,
args.max_vocab_size)
if args.ngram_buckets > 0:
data, batchify_fn, subword_function = transform_data_fasttext(
data, vocab, idx_to_counts, cbow=args.model.lower() == 'cbow',
ngram_buckets=args.ngram_buckets, ngrams=args.ngrams,
batch_size=args.batch_size, window_size=args.window,
frequent_token_subsampling=args.frequent_token_subsampling)
else:
subword_function = None
data, batchify_fn = transform_data_word2vec(
data, vocab, idx_to_counts, cbow=args.model.lower() == 'cbow',
batch_size=args.batch_size, window_size=args.window,
frequent_token_subsampling=args.frequent_token_subsampling)
num_tokens = float(sum(idx_to_counts))
model = CBOW if args.model.lower() == 'cbow' else SG
embedding = model(token_to_idx=vocab.token_to_idx, output_dim=args.emsize,
batch_size=args.batch_size, num_negatives=args.negative,
negatives_weights=mx.nd.array(idx_to_counts),
subword_function=subword_function)
context = get_context(args)
embedding.initialize(ctx=context)
if not args.no_hybridize:
embedding.hybridize(static_alloc=True, static_shape=True)
optimizer_kwargs = dict(learning_rate=args.lr)
try:
trainer = mx.gluon.Trainer(embedding.collect_params(), args.optimizer,
optimizer_kwargs)
except ValueError as e:
if args.optimizer == 'groupadagrad':
logging.warning('MXNet <= v1.3 does not contain '
'GroupAdaGrad support. Falling back to AdaGrad')
trainer = mx.gluon.Trainer(embedding.collect_params(), 'adagrad',
optimizer_kwargs)
else:
raise e
try:
if args.no_prefetch_batch:
data = data.transform(batchify_fn)
else:
from executors import LazyThreadPoolExecutor
num_cpu = len(os.sched_getaffinity(0))
ex = LazyThreadPoolExecutor(num_cpu)
except (ImportError, SyntaxError, AttributeError):
# Py2 - no async prefetching is supported
logging.warning(
'Asynchronous batch prefetching is not supported on Python 2. '
'Consider upgrading to Python 3 for improved performance.')
data = data.transform(batchify_fn)
num_update = 0
prefetched_iters = []
for _ in range(min(args.num_prefetch_epoch, args.epochs)):
prefetched_iters.append(iter(data))
for epoch in range(args.epochs):
if epoch + len(prefetched_iters) < args.epochs:
prefetched_iters.append(iter(data))
data_iter = prefetched_iters.pop(0)
try:
batches = ex.map(batchify_fn, data_iter)
except NameError: # Py 2 or batch prefetching disabled
batches = data_iter
# Logging variables
log_wc = 0
log_start_time = time.time()
log_avg_loss = 0
for i, batch in enumerate(batches):
ctx = context[i % len(context)]
batch = [array.as_in_context(ctx) for array in batch]
with mx.autograd.record():
loss = embedding(*batch)
loss.backward()
num_update += loss.shape[0]
if len(context) == 1 or (i + 1) % len(context) == 0:
trainer.step(batch_size=1)
# Logging
log_wc += loss.shape[0]
log_avg_loss += loss.mean().as_in_context(context[0])
if (i + 1) % args.log_interval == 0:
# Forces waiting for computation by computing loss value
log_avg_loss = log_avg_loss.asscalar() / args.log_interval
wps = log_wc / (time.time() - log_start_time)
# Due to subsampling, the overall number of batches is an upper
# bound
num_batches = num_tokens // args.batch_size
if args.model.lower() == 'skipgram':
num_batches = (num_tokens * args.window * 2) // args.batch_size
else:
num_batches = num_tokens // args.batch_size
logging.info('[Epoch {} Batch {}/{}] loss={:.4f}, '
'throughput={:.2f}K wps, wc={:.2f}K'.format(
epoch, i + 1, num_batches, log_avg_loss,
wps / 1000, log_wc / 1000))
log_start_time = time.time()
log_avg_loss = 0
log_wc = 0
if args.eval_interval and (i + 1) % args.eval_interval == 0:
with print_time('mx.nd.waitall()'):
mx.nd.waitall()
with print_time('evaluate'):
evaluate(args, embedding, vocab, num_update)
# Evaluate
with print_time('mx.nd.waitall()'):
mx.nd.waitall()
with print_time('evaluate'):
evaluate(args, embedding, vocab, num_update,
eval_analogy=not args.no_eval_analogy)
# Save params
with print_time('save parameters'):
embedding.save_parameters(os.path.join(args.logdir, 'embedding.params')) | [
"def",
"train",
"(",
"args",
")",
":",
"if",
"not",
"args",
".",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'cbow'",
",",
"'skipgram'",
"]",
":",
"logging",
".",
"error",
"(",
"'Unsupported model %s.'",
",",
"args",
".",
"model",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"data",
".",
"lower",
"(",
")",
"==",
"'toy'",
":",
"data",
"=",
"mx",
".",
"gluon",
".",
"data",
".",
"SimpleDataset",
"(",
"nlp",
".",
"data",
".",
"Text8",
"(",
"segment",
"=",
"'train'",
")",
"[",
":",
"2",
"]",
")",
"data",
",",
"vocab",
",",
"idx_to_counts",
"=",
"preprocess_dataset",
"(",
"data",
",",
"max_vocab_size",
"=",
"args",
".",
"max_vocab_size",
")",
"elif",
"args",
".",
"data",
".",
"lower",
"(",
")",
"==",
"'text8'",
":",
"data",
"=",
"nlp",
".",
"data",
".",
"Text8",
"(",
"segment",
"=",
"'train'",
")",
"data",
",",
"vocab",
",",
"idx_to_counts",
"=",
"preprocess_dataset",
"(",
"data",
",",
"max_vocab_size",
"=",
"args",
".",
"max_vocab_size",
")",
"elif",
"args",
".",
"data",
".",
"lower",
"(",
")",
"==",
"'fil9'",
":",
"data",
"=",
"nlp",
".",
"data",
".",
"Fil9",
"(",
"max_sentence_length",
"=",
"10000",
")",
"data",
",",
"vocab",
",",
"idx_to_counts",
"=",
"preprocess_dataset",
"(",
"data",
",",
"max_vocab_size",
"=",
"args",
".",
"max_vocab_size",
")",
"elif",
"args",
".",
"data",
".",
"lower",
"(",
")",
"==",
"'wiki'",
":",
"data",
",",
"vocab",
",",
"idx_to_counts",
"=",
"wiki",
"(",
"args",
".",
"wiki_root",
",",
"args",
".",
"wiki_date",
",",
"args",
".",
"wiki_language",
",",
"args",
".",
"max_vocab_size",
")",
"if",
"args",
".",
"ngram_buckets",
">",
"0",
":",
"data",
",",
"batchify_fn",
",",
"subword_function",
"=",
"transform_data_fasttext",
"(",
"data",
",",
"vocab",
",",
"idx_to_counts",
",",
"cbow",
"=",
"args",
".",
"model",
".",
"lower",
"(",
")",
"==",
"'cbow'",
",",
"ngram_buckets",
"=",
"args",
".",
"ngram_buckets",
",",
"ngrams",
"=",
"args",
".",
"ngrams",
",",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"window_size",
"=",
"args",
".",
"window",
",",
"frequent_token_subsampling",
"=",
"args",
".",
"frequent_token_subsampling",
")",
"else",
":",
"subword_function",
"=",
"None",
"data",
",",
"batchify_fn",
"=",
"transform_data_word2vec",
"(",
"data",
",",
"vocab",
",",
"idx_to_counts",
",",
"cbow",
"=",
"args",
".",
"model",
".",
"lower",
"(",
")",
"==",
"'cbow'",
",",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"window_size",
"=",
"args",
".",
"window",
",",
"frequent_token_subsampling",
"=",
"args",
".",
"frequent_token_subsampling",
")",
"num_tokens",
"=",
"float",
"(",
"sum",
"(",
"idx_to_counts",
")",
")",
"model",
"=",
"CBOW",
"if",
"args",
".",
"model",
".",
"lower",
"(",
")",
"==",
"'cbow'",
"else",
"SG",
"embedding",
"=",
"model",
"(",
"token_to_idx",
"=",
"vocab",
".",
"token_to_idx",
",",
"output_dim",
"=",
"args",
".",
"emsize",
",",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"num_negatives",
"=",
"args",
".",
"negative",
",",
"negatives_weights",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"idx_to_counts",
")",
",",
"subword_function",
"=",
"subword_function",
")",
"context",
"=",
"get_context",
"(",
"args",
")",
"embedding",
".",
"initialize",
"(",
"ctx",
"=",
"context",
")",
"if",
"not",
"args",
".",
"no_hybridize",
":",
"embedding",
".",
"hybridize",
"(",
"static_alloc",
"=",
"True",
",",
"static_shape",
"=",
"True",
")",
"optimizer_kwargs",
"=",
"dict",
"(",
"learning_rate",
"=",
"args",
".",
"lr",
")",
"try",
":",
"trainer",
"=",
"mx",
".",
"gluon",
".",
"Trainer",
"(",
"embedding",
".",
"collect_params",
"(",
")",
",",
"args",
".",
"optimizer",
",",
"optimizer_kwargs",
")",
"except",
"ValueError",
"as",
"e",
":",
"if",
"args",
".",
"optimizer",
"==",
"'groupadagrad'",
":",
"logging",
".",
"warning",
"(",
"'MXNet <= v1.3 does not contain '",
"'GroupAdaGrad support. Falling back to AdaGrad'",
")",
"trainer",
"=",
"mx",
".",
"gluon",
".",
"Trainer",
"(",
"embedding",
".",
"collect_params",
"(",
")",
",",
"'adagrad'",
",",
"optimizer_kwargs",
")",
"else",
":",
"raise",
"e",
"try",
":",
"if",
"args",
".",
"no_prefetch_batch",
":",
"data",
"=",
"data",
".",
"transform",
"(",
"batchify_fn",
")",
"else",
":",
"from",
"executors",
"import",
"LazyThreadPoolExecutor",
"num_cpu",
"=",
"len",
"(",
"os",
".",
"sched_getaffinity",
"(",
"0",
")",
")",
"ex",
"=",
"LazyThreadPoolExecutor",
"(",
"num_cpu",
")",
"except",
"(",
"ImportError",
",",
"SyntaxError",
",",
"AttributeError",
")",
":",
"# Py2 - no async prefetching is supported",
"logging",
".",
"warning",
"(",
"'Asynchronous batch prefetching is not supported on Python 2. '",
"'Consider upgrading to Python 3 for improved performance.'",
")",
"data",
"=",
"data",
".",
"transform",
"(",
"batchify_fn",
")",
"num_update",
"=",
"0",
"prefetched_iters",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"min",
"(",
"args",
".",
"num_prefetch_epoch",
",",
"args",
".",
"epochs",
")",
")",
":",
"prefetched_iters",
".",
"append",
"(",
"iter",
"(",
"data",
")",
")",
"for",
"epoch",
"in",
"range",
"(",
"args",
".",
"epochs",
")",
":",
"if",
"epoch",
"+",
"len",
"(",
"prefetched_iters",
")",
"<",
"args",
".",
"epochs",
":",
"prefetched_iters",
".",
"append",
"(",
"iter",
"(",
"data",
")",
")",
"data_iter",
"=",
"prefetched_iters",
".",
"pop",
"(",
"0",
")",
"try",
":",
"batches",
"=",
"ex",
".",
"map",
"(",
"batchify_fn",
",",
"data_iter",
")",
"except",
"NameError",
":",
"# Py 2 or batch prefetching disabled",
"batches",
"=",
"data_iter",
"# Logging variables",
"log_wc",
"=",
"0",
"log_start_time",
"=",
"time",
".",
"time",
"(",
")",
"log_avg_loss",
"=",
"0",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"batches",
")",
":",
"ctx",
"=",
"context",
"[",
"i",
"%",
"len",
"(",
"context",
")",
"]",
"batch",
"=",
"[",
"array",
".",
"as_in_context",
"(",
"ctx",
")",
"for",
"array",
"in",
"batch",
"]",
"with",
"mx",
".",
"autograd",
".",
"record",
"(",
")",
":",
"loss",
"=",
"embedding",
"(",
"*",
"batch",
")",
"loss",
".",
"backward",
"(",
")",
"num_update",
"+=",
"loss",
".",
"shape",
"[",
"0",
"]",
"if",
"len",
"(",
"context",
")",
"==",
"1",
"or",
"(",
"i",
"+",
"1",
")",
"%",
"len",
"(",
"context",
")",
"==",
"0",
":",
"trainer",
".",
"step",
"(",
"batch_size",
"=",
"1",
")",
"# Logging",
"log_wc",
"+=",
"loss",
".",
"shape",
"[",
"0",
"]",
"log_avg_loss",
"+=",
"loss",
".",
"mean",
"(",
")",
".",
"as_in_context",
"(",
"context",
"[",
"0",
"]",
")",
"if",
"(",
"i",
"+",
"1",
")",
"%",
"args",
".",
"log_interval",
"==",
"0",
":",
"# Forces waiting for computation by computing loss value",
"log_avg_loss",
"=",
"log_avg_loss",
".",
"asscalar",
"(",
")",
"/",
"args",
".",
"log_interval",
"wps",
"=",
"log_wc",
"/",
"(",
"time",
".",
"time",
"(",
")",
"-",
"log_start_time",
")",
"# Due to subsampling, the overall number of batches is an upper",
"# bound",
"num_batches",
"=",
"num_tokens",
"//",
"args",
".",
"batch_size",
"if",
"args",
".",
"model",
".",
"lower",
"(",
")",
"==",
"'skipgram'",
":",
"num_batches",
"=",
"(",
"num_tokens",
"*",
"args",
".",
"window",
"*",
"2",
")",
"//",
"args",
".",
"batch_size",
"else",
":",
"num_batches",
"=",
"num_tokens",
"//",
"args",
".",
"batch_size",
"logging",
".",
"info",
"(",
"'[Epoch {} Batch {}/{}] loss={:.4f}, '",
"'throughput={:.2f}K wps, wc={:.2f}K'",
".",
"format",
"(",
"epoch",
",",
"i",
"+",
"1",
",",
"num_batches",
",",
"log_avg_loss",
",",
"wps",
"/",
"1000",
",",
"log_wc",
"/",
"1000",
")",
")",
"log_start_time",
"=",
"time",
".",
"time",
"(",
")",
"log_avg_loss",
"=",
"0",
"log_wc",
"=",
"0",
"if",
"args",
".",
"eval_interval",
"and",
"(",
"i",
"+",
"1",
")",
"%",
"args",
".",
"eval_interval",
"==",
"0",
":",
"with",
"print_time",
"(",
"'mx.nd.waitall()'",
")",
":",
"mx",
".",
"nd",
".",
"waitall",
"(",
")",
"with",
"print_time",
"(",
"'evaluate'",
")",
":",
"evaluate",
"(",
"args",
",",
"embedding",
",",
"vocab",
",",
"num_update",
")",
"# Evaluate",
"with",
"print_time",
"(",
"'mx.nd.waitall()'",
")",
":",
"mx",
".",
"nd",
".",
"waitall",
"(",
")",
"with",
"print_time",
"(",
"'evaluate'",
")",
":",
"evaluate",
"(",
"args",
",",
"embedding",
",",
"vocab",
",",
"num_update",
",",
"eval_analogy",
"=",
"not",
"args",
".",
"no_eval_analogy",
")",
"# Save params",
"with",
"print_time",
"(",
"'save parameters'",
")",
":",
"embedding",
".",
"save_parameters",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"logdir",
",",
"'embedding.params'",
")",
")"
] | Training helper. | [
"Training",
"helper",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/train_sg_cbow.py#L138-L279 | train |
dmlc/gluon-nlp | scripts/word_embeddings/train_sg_cbow.py | evaluate | def evaluate(args, embedding, vocab, global_step, eval_analogy=False):
"""Evaluation helper"""
if 'eval_tokens' not in globals():
global eval_tokens
eval_tokens_set = evaluation.get_tokens_in_evaluation_datasets(args)
if not args.no_eval_analogy:
eval_tokens_set.update(vocab.idx_to_token)
if not args.ngram_buckets:
# Word2Vec does not support computing vectors for OOV words
eval_tokens_set = filter(lambda t: t in vocab, eval_tokens_set)
eval_tokens = list(eval_tokens_set)
if not os.path.isdir(args.logdir):
os.makedirs(args.logdir)
# Compute their word vectors
context = get_context(args)
mx.nd.waitall()
token_embedding = nlp.embedding.TokenEmbedding(unknown_token=None,
allow_extend=True)
token_embedding[eval_tokens] = embedding[eval_tokens]
results = evaluation.evaluate_similarity(
args, token_embedding, context[0], logfile=os.path.join(
args.logdir, 'similarity.tsv'), global_step=global_step)
if eval_analogy:
assert not args.no_eval_analogy
results += evaluation.evaluate_analogy(
args, token_embedding, context[0], logfile=os.path.join(
args.logdir, 'analogy.tsv'))
return results | python | def evaluate(args, embedding, vocab, global_step, eval_analogy=False):
"""Evaluation helper"""
if 'eval_tokens' not in globals():
global eval_tokens
eval_tokens_set = evaluation.get_tokens_in_evaluation_datasets(args)
if not args.no_eval_analogy:
eval_tokens_set.update(vocab.idx_to_token)
if not args.ngram_buckets:
# Word2Vec does not support computing vectors for OOV words
eval_tokens_set = filter(lambda t: t in vocab, eval_tokens_set)
eval_tokens = list(eval_tokens_set)
if not os.path.isdir(args.logdir):
os.makedirs(args.logdir)
# Compute their word vectors
context = get_context(args)
mx.nd.waitall()
token_embedding = nlp.embedding.TokenEmbedding(unknown_token=None,
allow_extend=True)
token_embedding[eval_tokens] = embedding[eval_tokens]
results = evaluation.evaluate_similarity(
args, token_embedding, context[0], logfile=os.path.join(
args.logdir, 'similarity.tsv'), global_step=global_step)
if eval_analogy:
assert not args.no_eval_analogy
results += evaluation.evaluate_analogy(
args, token_embedding, context[0], logfile=os.path.join(
args.logdir, 'analogy.tsv'))
return results | [
"def",
"evaluate",
"(",
"args",
",",
"embedding",
",",
"vocab",
",",
"global_step",
",",
"eval_analogy",
"=",
"False",
")",
":",
"if",
"'eval_tokens'",
"not",
"in",
"globals",
"(",
")",
":",
"global",
"eval_tokens",
"eval_tokens_set",
"=",
"evaluation",
".",
"get_tokens_in_evaluation_datasets",
"(",
"args",
")",
"if",
"not",
"args",
".",
"no_eval_analogy",
":",
"eval_tokens_set",
".",
"update",
"(",
"vocab",
".",
"idx_to_token",
")",
"if",
"not",
"args",
".",
"ngram_buckets",
":",
"# Word2Vec does not support computing vectors for OOV words",
"eval_tokens_set",
"=",
"filter",
"(",
"lambda",
"t",
":",
"t",
"in",
"vocab",
",",
"eval_tokens_set",
")",
"eval_tokens",
"=",
"list",
"(",
"eval_tokens_set",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"args",
".",
"logdir",
")",
":",
"os",
".",
"makedirs",
"(",
"args",
".",
"logdir",
")",
"# Compute their word vectors",
"context",
"=",
"get_context",
"(",
"args",
")",
"mx",
".",
"nd",
".",
"waitall",
"(",
")",
"token_embedding",
"=",
"nlp",
".",
"embedding",
".",
"TokenEmbedding",
"(",
"unknown_token",
"=",
"None",
",",
"allow_extend",
"=",
"True",
")",
"token_embedding",
"[",
"eval_tokens",
"]",
"=",
"embedding",
"[",
"eval_tokens",
"]",
"results",
"=",
"evaluation",
".",
"evaluate_similarity",
"(",
"args",
",",
"token_embedding",
",",
"context",
"[",
"0",
"]",
",",
"logfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"logdir",
",",
"'similarity.tsv'",
")",
",",
"global_step",
"=",
"global_step",
")",
"if",
"eval_analogy",
":",
"assert",
"not",
"args",
".",
"no_eval_analogy",
"results",
"+=",
"evaluation",
".",
"evaluate_analogy",
"(",
"args",
",",
"token_embedding",
",",
"context",
"[",
"0",
"]",
",",
"logfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"logdir",
",",
"'analogy.tsv'",
")",
")",
"return",
"results"
] | Evaluation helper | [
"Evaluation",
"helper"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/train_sg_cbow.py#L282-L317 | train |
dmlc/gluon-nlp | src/gluonnlp/data/dataset.py | NumpyDataset.get_field | def get_field(self, field):
"""Return the dataset corresponds to the provided key.
Example::
a = np.ones((2,2))
b = np.zeros((2,2))
np.savez('data.npz', a=a, b=b)
dataset = NumpyDataset('data.npz')
data_a = dataset.get_field('a')
data_b = dataset.get_field('b')
Parameters
----------
field : str
The name of the field to retrieve.
"""
idx = self._keys.index(field)
return self._data[idx] | python | def get_field(self, field):
"""Return the dataset corresponds to the provided key.
Example::
a = np.ones((2,2))
b = np.zeros((2,2))
np.savez('data.npz', a=a, b=b)
dataset = NumpyDataset('data.npz')
data_a = dataset.get_field('a')
data_b = dataset.get_field('b')
Parameters
----------
field : str
The name of the field to retrieve.
"""
idx = self._keys.index(field)
return self._data[idx] | [
"def",
"get_field",
"(",
"self",
",",
"field",
")",
":",
"idx",
"=",
"self",
".",
"_keys",
".",
"index",
"(",
"field",
")",
"return",
"self",
".",
"_data",
"[",
"idx",
"]"
] | Return the dataset corresponds to the provided key.
Example::
a = np.ones((2,2))
b = np.zeros((2,2))
np.savez('data.npz', a=a, b=b)
dataset = NumpyDataset('data.npz')
data_a = dataset.get_field('a')
data_b = dataset.get_field('b')
Parameters
----------
field : str
The name of the field to retrieve. | [
"Return",
"the",
"dataset",
"corresponds",
"to",
"the",
"provided",
"key",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/dataset.py#L259-L276 | train |
dmlc/gluon-nlp | scripts/bert/bert_qa_evaluate.py | get_final_text | def get_final_text(pred_text, orig_text, tokenizer):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = OrderedDict()
for (i, c) in enumerate(text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tok_text = ' '.join(tokenizer(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text | python | def get_final_text(pred_text, orig_text, tokenizer):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = OrderedDict()
for (i, c) in enumerate(text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tok_text = ' '.join(tokenizer(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text | [
"def",
"get_final_text",
"(",
"pred_text",
",",
"orig_text",
",",
"tokenizer",
")",
":",
"# When we created the data, we kept track of the alignment between original",
"# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So",
"# now `orig_text` contains the span of our original text corresponding to the",
"# span that we predicted.",
"#",
"# However, `orig_text` may contain extra characters that we don't want in",
"# our prediction.",
"#",
"# For example, let's say:",
"# pred_text = steve smith",
"# orig_text = Steve Smith's",
"#",
"# We don't want to return `orig_text` because it contains the extra \"'s\".",
"#",
"# We don't want to return `pred_text` because it's already been normalized",
"# (the SQuAD eval script also does punctuation stripping/lower casing but",
"# our tokenizer does additional normalization like stripping accent",
"# characters).",
"#",
"# What we really want to return is \"Steve Smith\".",
"#",
"# Therefore, we have to apply a semi-complicated alignment heruistic between",
"# `pred_text` and `orig_text` to get a character-to-charcter alignment. This",
"# can fail in certain cases in which case we just return `orig_text`.",
"def",
"_strip_spaces",
"(",
"text",
")",
":",
"ns_chars",
"=",
"[",
"]",
"ns_to_s_map",
"=",
"OrderedDict",
"(",
")",
"for",
"(",
"i",
",",
"c",
")",
"in",
"enumerate",
"(",
"text",
")",
":",
"if",
"c",
"==",
"' '",
":",
"continue",
"ns_to_s_map",
"[",
"len",
"(",
"ns_chars",
")",
"]",
"=",
"i",
"ns_chars",
".",
"append",
"(",
"c",
")",
"ns_text",
"=",
"''",
".",
"join",
"(",
"ns_chars",
")",
"return",
"(",
"ns_text",
",",
"ns_to_s_map",
")",
"# We first tokenize `orig_text`, strip whitespace from the result",
"# and `pred_text`, and check if they are the same length. If they are",
"# NOT the same length, the heuristic has failed. If they are the same",
"# length, we assume the characters are one-to-one aligned.",
"tok_text",
"=",
"' '",
".",
"join",
"(",
"tokenizer",
"(",
"orig_text",
")",
")",
"start_position",
"=",
"tok_text",
".",
"find",
"(",
"pred_text",
")",
"if",
"start_position",
"==",
"-",
"1",
":",
"return",
"orig_text",
"end_position",
"=",
"start_position",
"+",
"len",
"(",
"pred_text",
")",
"-",
"1",
"(",
"orig_ns_text",
",",
"orig_ns_to_s_map",
")",
"=",
"_strip_spaces",
"(",
"orig_text",
")",
"(",
"tok_ns_text",
",",
"tok_ns_to_s_map",
")",
"=",
"_strip_spaces",
"(",
"tok_text",
")",
"if",
"len",
"(",
"orig_ns_text",
")",
"!=",
"len",
"(",
"tok_ns_text",
")",
":",
"return",
"orig_text",
"# We then project the characters in `pred_text` back to `orig_text` using",
"# the character-to-character alignment.",
"tok_s_to_ns_map",
"=",
"{",
"}",
"for",
"(",
"i",
",",
"tok_index",
")",
"in",
"six",
".",
"iteritems",
"(",
"tok_ns_to_s_map",
")",
":",
"tok_s_to_ns_map",
"[",
"tok_index",
"]",
"=",
"i",
"orig_start_position",
"=",
"None",
"if",
"start_position",
"in",
"tok_s_to_ns_map",
":",
"ns_start_position",
"=",
"tok_s_to_ns_map",
"[",
"start_position",
"]",
"if",
"ns_start_position",
"in",
"orig_ns_to_s_map",
":",
"orig_start_position",
"=",
"orig_ns_to_s_map",
"[",
"ns_start_position",
"]",
"if",
"orig_start_position",
"is",
"None",
":",
"return",
"orig_text",
"orig_end_position",
"=",
"None",
"if",
"end_position",
"in",
"tok_s_to_ns_map",
":",
"ns_end_position",
"=",
"tok_s_to_ns_map",
"[",
"end_position",
"]",
"if",
"ns_end_position",
"in",
"orig_ns_to_s_map",
":",
"orig_end_position",
"=",
"orig_ns_to_s_map",
"[",
"ns_end_position",
"]",
"if",
"orig_end_position",
"is",
"None",
":",
"return",
"orig_text",
"output_text",
"=",
"orig_text",
"[",
"orig_start_position",
":",
"(",
"orig_end_position",
"+",
"1",
")",
"]",
"return",
"output_text"
] | Project the tokenized prediction back to the original text. | [
"Project",
"the",
"tokenized",
"prediction",
"back",
"to",
"the",
"original",
"text",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/bert_qa_evaluate.py#L38-L120 | train |
dmlc/gluon-nlp | scripts/bert/bert_qa_evaluate.py | predictions | def predictions(dev_dataset,
all_results,
tokenizer,
max_answer_length=64,
null_score_diff_threshold=0.0,
n_best_size=10,
version_2=False):
"""Get prediction results
Parameters
----------
dev_dataset: dataset
Examples of transform.
all_results: dict
A dictionary containing model prediction results.
tokenizer: callable
Tokenizer function.
max_answer_length: int, default 64
Maximum length of the answer tokens.
null_score_diff_threshold: float, default 0.0
If null_score - best_non_null is greater than the threshold predict null.
n_best_size: int, default 10
The total number of n-best predictions.
version_2: bool, default False
If true, the SQuAD examples contain some that do not have an answer.
Returns
-------
all_predictions: dict
All final predictions.
all_nbest_json: dict
All n-best predictions.
scores_diff_json: dict
Record the difference between null score and the score of best non-null.
when version_2 is True.
"""
_PrelimPrediction = namedtuple('PrelimPrediction',
['feature_index', 'start_index', 'end_index',
'start_logit', 'end_logit'])
_NbestPrediction = namedtuple(
'NbestPrediction', ['text', 'start_logit', 'end_logit'])
all_predictions = OrderedDict()
all_nbest_json = OrderedDict()
scores_diff_json = OrderedDict()
for features in dev_dataset:
results = all_results[features[0].example_id]
example_qas_id = features[0].qas_id
prelim_predictions = []
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for features_id, (result, feature) in enumerate(zip(results, features)):
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if version_2:
feature_null_score = result.start_logits[0] + \
result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = features_id
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=features_id,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(
pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = feature.doc_tokens[orig_doc_start:(
orig_doc_end + 1)]
tok_text = ' '.join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(' ##', '')
tok_text = tok_text.replace('##', '')
# Clean whitespace
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, tokenizer)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ''
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2:
if '' not in seen_predictions:
nbest.append(
_NbestPrediction(
text='',
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = nd.softmax(nd.array(total_scores)).asnumpy()
nbest_json = []
for (i, entry) in enumerate(nbest):
output = OrderedDict()
output['text'] = entry.text
output['probability'] = float(probs[i])
output['start_logit'] = entry.start_logit
output['end_logit'] = entry.end_logit
nbest_json.append(output)
if not version_2:
all_predictions[example_qas_id] = nbest_json[0]['text']
else:
# predict '' iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - \
best_non_null_entry.end_logit
scores_diff_json[example_qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example_qas_id] = ''
else:
all_predictions[example_qas_id] = best_non_null_entry.text
all_nbest_json[example_qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json | python | def predictions(dev_dataset,
all_results,
tokenizer,
max_answer_length=64,
null_score_diff_threshold=0.0,
n_best_size=10,
version_2=False):
"""Get prediction results
Parameters
----------
dev_dataset: dataset
Examples of transform.
all_results: dict
A dictionary containing model prediction results.
tokenizer: callable
Tokenizer function.
max_answer_length: int, default 64
Maximum length of the answer tokens.
null_score_diff_threshold: float, default 0.0
If null_score - best_non_null is greater than the threshold predict null.
n_best_size: int, default 10
The total number of n-best predictions.
version_2: bool, default False
If true, the SQuAD examples contain some that do not have an answer.
Returns
-------
all_predictions: dict
All final predictions.
all_nbest_json: dict
All n-best predictions.
scores_diff_json: dict
Record the difference between null score and the score of best non-null.
when version_2 is True.
"""
_PrelimPrediction = namedtuple('PrelimPrediction',
['feature_index', 'start_index', 'end_index',
'start_logit', 'end_logit'])
_NbestPrediction = namedtuple(
'NbestPrediction', ['text', 'start_logit', 'end_logit'])
all_predictions = OrderedDict()
all_nbest_json = OrderedDict()
scores_diff_json = OrderedDict()
for features in dev_dataset:
results = all_results[features[0].example_id]
example_qas_id = features[0].qas_id
prelim_predictions = []
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for features_id, (result, feature) in enumerate(zip(results, features)):
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if version_2:
feature_null_score = result.start_logits[0] + \
result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = features_id
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=features_id,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(
pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = feature.doc_tokens[orig_doc_start:(
orig_doc_end + 1)]
tok_text = ' '.join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(' ##', '')
tok_text = tok_text.replace('##', '')
# Clean whitespace
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, tokenizer)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ''
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2:
if '' not in seen_predictions:
nbest.append(
_NbestPrediction(
text='',
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = nd.softmax(nd.array(total_scores)).asnumpy()
nbest_json = []
for (i, entry) in enumerate(nbest):
output = OrderedDict()
output['text'] = entry.text
output['probability'] = float(probs[i])
output['start_logit'] = entry.start_logit
output['end_logit'] = entry.end_logit
nbest_json.append(output)
if not version_2:
all_predictions[example_qas_id] = nbest_json[0]['text']
else:
# predict '' iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - \
best_non_null_entry.end_logit
scores_diff_json[example_qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example_qas_id] = ''
else:
all_predictions[example_qas_id] = best_non_null_entry.text
all_nbest_json[example_qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json | [
"def",
"predictions",
"(",
"dev_dataset",
",",
"all_results",
",",
"tokenizer",
",",
"max_answer_length",
"=",
"64",
",",
"null_score_diff_threshold",
"=",
"0.0",
",",
"n_best_size",
"=",
"10",
",",
"version_2",
"=",
"False",
")",
":",
"_PrelimPrediction",
"=",
"namedtuple",
"(",
"'PrelimPrediction'",
",",
"[",
"'feature_index'",
",",
"'start_index'",
",",
"'end_index'",
",",
"'start_logit'",
",",
"'end_logit'",
"]",
")",
"_NbestPrediction",
"=",
"namedtuple",
"(",
"'NbestPrediction'",
",",
"[",
"'text'",
",",
"'start_logit'",
",",
"'end_logit'",
"]",
")",
"all_predictions",
"=",
"OrderedDict",
"(",
")",
"all_nbest_json",
"=",
"OrderedDict",
"(",
")",
"scores_diff_json",
"=",
"OrderedDict",
"(",
")",
"for",
"features",
"in",
"dev_dataset",
":",
"results",
"=",
"all_results",
"[",
"features",
"[",
"0",
"]",
".",
"example_id",
"]",
"example_qas_id",
"=",
"features",
"[",
"0",
"]",
".",
"qas_id",
"prelim_predictions",
"=",
"[",
"]",
"score_null",
"=",
"1000000",
"# large and positive",
"min_null_feature_index",
"=",
"0",
"# the paragraph slice with min mull score",
"null_start_logit",
"=",
"0",
"# the start logit at the slice with min null score",
"null_end_logit",
"=",
"0",
"# the end logit at the slice with min null score",
"for",
"features_id",
",",
"(",
"result",
",",
"feature",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"results",
",",
"features",
")",
")",
":",
"start_indexes",
"=",
"_get_best_indexes",
"(",
"result",
".",
"start_logits",
",",
"n_best_size",
")",
"end_indexes",
"=",
"_get_best_indexes",
"(",
"result",
".",
"end_logits",
",",
"n_best_size",
")",
"if",
"version_2",
":",
"feature_null_score",
"=",
"result",
".",
"start_logits",
"[",
"0",
"]",
"+",
"result",
".",
"end_logits",
"[",
"0",
"]",
"if",
"feature_null_score",
"<",
"score_null",
":",
"score_null",
"=",
"feature_null_score",
"min_null_feature_index",
"=",
"features_id",
"null_start_logit",
"=",
"result",
".",
"start_logits",
"[",
"0",
"]",
"null_end_logit",
"=",
"result",
".",
"end_logits",
"[",
"0",
"]",
"for",
"start_index",
"in",
"start_indexes",
":",
"for",
"end_index",
"in",
"end_indexes",
":",
"# We could hypothetically create invalid predictions, e.g., predict",
"# that the start of the span is in the question. We throw out all",
"# invalid predictions.",
"if",
"start_index",
">=",
"len",
"(",
"feature",
".",
"tokens",
")",
":",
"continue",
"if",
"end_index",
">=",
"len",
"(",
"feature",
".",
"tokens",
")",
":",
"continue",
"if",
"start_index",
"not",
"in",
"feature",
".",
"token_to_orig_map",
":",
"continue",
"if",
"end_index",
"not",
"in",
"feature",
".",
"token_to_orig_map",
":",
"continue",
"if",
"not",
"feature",
".",
"token_is_max_context",
".",
"get",
"(",
"start_index",
",",
"False",
")",
":",
"continue",
"if",
"end_index",
"<",
"start_index",
":",
"continue",
"length",
"=",
"end_index",
"-",
"start_index",
"+",
"1",
"if",
"length",
">",
"max_answer_length",
":",
"continue",
"prelim_predictions",
".",
"append",
"(",
"_PrelimPrediction",
"(",
"feature_index",
"=",
"features_id",
",",
"start_index",
"=",
"start_index",
",",
"end_index",
"=",
"end_index",
",",
"start_logit",
"=",
"result",
".",
"start_logits",
"[",
"start_index",
"]",
",",
"end_logit",
"=",
"result",
".",
"end_logits",
"[",
"end_index",
"]",
")",
")",
"if",
"version_2",
":",
"prelim_predictions",
".",
"append",
"(",
"_PrelimPrediction",
"(",
"feature_index",
"=",
"min_null_feature_index",
",",
"start_index",
"=",
"0",
",",
"end_index",
"=",
"0",
",",
"start_logit",
"=",
"null_start_logit",
",",
"end_logit",
"=",
"null_end_logit",
")",
")",
"prelim_predictions",
"=",
"sorted",
"(",
"prelim_predictions",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"start_logit",
"+",
"x",
".",
"end_logit",
")",
",",
"reverse",
"=",
"True",
")",
"seen_predictions",
"=",
"{",
"}",
"nbest",
"=",
"[",
"]",
"for",
"pred",
"in",
"prelim_predictions",
":",
"if",
"len",
"(",
"nbest",
")",
">=",
"n_best_size",
":",
"break",
"feature",
"=",
"features",
"[",
"pred",
".",
"feature_index",
"]",
"if",
"pred",
".",
"start_index",
">",
"0",
":",
"# this is a non-null prediction",
"tok_tokens",
"=",
"feature",
".",
"tokens",
"[",
"pred",
".",
"start_index",
":",
"(",
"pred",
".",
"end_index",
"+",
"1",
")",
"]",
"orig_doc_start",
"=",
"feature",
".",
"token_to_orig_map",
"[",
"pred",
".",
"start_index",
"]",
"orig_doc_end",
"=",
"feature",
".",
"token_to_orig_map",
"[",
"pred",
".",
"end_index",
"]",
"orig_tokens",
"=",
"feature",
".",
"doc_tokens",
"[",
"orig_doc_start",
":",
"(",
"orig_doc_end",
"+",
"1",
")",
"]",
"tok_text",
"=",
"' '",
".",
"join",
"(",
"tok_tokens",
")",
"# De-tokenize WordPieces that have been split off.",
"tok_text",
"=",
"tok_text",
".",
"replace",
"(",
"' ##'",
",",
"''",
")",
"tok_text",
"=",
"tok_text",
".",
"replace",
"(",
"'##'",
",",
"''",
")",
"# Clean whitespace",
"tok_text",
"=",
"tok_text",
".",
"strip",
"(",
")",
"tok_text",
"=",
"' '",
".",
"join",
"(",
"tok_text",
".",
"split",
"(",
")",
")",
"orig_text",
"=",
"' '",
".",
"join",
"(",
"orig_tokens",
")",
"final_text",
"=",
"get_final_text",
"(",
"tok_text",
",",
"orig_text",
",",
"tokenizer",
")",
"if",
"final_text",
"in",
"seen_predictions",
":",
"continue",
"seen_predictions",
"[",
"final_text",
"]",
"=",
"True",
"else",
":",
"final_text",
"=",
"''",
"seen_predictions",
"[",
"final_text",
"]",
"=",
"True",
"nbest",
".",
"append",
"(",
"_NbestPrediction",
"(",
"text",
"=",
"final_text",
",",
"start_logit",
"=",
"pred",
".",
"start_logit",
",",
"end_logit",
"=",
"pred",
".",
"end_logit",
")",
")",
"# if we didn't inlude the empty option in the n-best, inlcude it",
"if",
"version_2",
":",
"if",
"''",
"not",
"in",
"seen_predictions",
":",
"nbest",
".",
"append",
"(",
"_NbestPrediction",
"(",
"text",
"=",
"''",
",",
"start_logit",
"=",
"null_start_logit",
",",
"end_logit",
"=",
"null_end_logit",
")",
")",
"# In very rare edge cases we could have no valid predictions. So we",
"# just create a nonce prediction in this case to avoid failure.",
"if",
"not",
"nbest",
":",
"nbest",
".",
"append",
"(",
"_NbestPrediction",
"(",
"text",
"=",
"'empty'",
",",
"start_logit",
"=",
"0.0",
",",
"end_logit",
"=",
"0.0",
")",
")",
"assert",
"len",
"(",
"nbest",
")",
">=",
"1",
"total_scores",
"=",
"[",
"]",
"best_non_null_entry",
"=",
"None",
"for",
"entry",
"in",
"nbest",
":",
"total_scores",
".",
"append",
"(",
"entry",
".",
"start_logit",
"+",
"entry",
".",
"end_logit",
")",
"if",
"not",
"best_non_null_entry",
":",
"if",
"entry",
".",
"text",
":",
"best_non_null_entry",
"=",
"entry",
"probs",
"=",
"nd",
".",
"softmax",
"(",
"nd",
".",
"array",
"(",
"total_scores",
")",
")",
".",
"asnumpy",
"(",
")",
"nbest_json",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"entry",
")",
"in",
"enumerate",
"(",
"nbest",
")",
":",
"output",
"=",
"OrderedDict",
"(",
")",
"output",
"[",
"'text'",
"]",
"=",
"entry",
".",
"text",
"output",
"[",
"'probability'",
"]",
"=",
"float",
"(",
"probs",
"[",
"i",
"]",
")",
"output",
"[",
"'start_logit'",
"]",
"=",
"entry",
".",
"start_logit",
"output",
"[",
"'end_logit'",
"]",
"=",
"entry",
".",
"end_logit",
"nbest_json",
".",
"append",
"(",
"output",
")",
"if",
"not",
"version_2",
":",
"all_predictions",
"[",
"example_qas_id",
"]",
"=",
"nbest_json",
"[",
"0",
"]",
"[",
"'text'",
"]",
"else",
":",
"# predict '' iff the null score - the score of best non-null > threshold",
"score_diff",
"=",
"score_null",
"-",
"best_non_null_entry",
".",
"start_logit",
"-",
"best_non_null_entry",
".",
"end_logit",
"scores_diff_json",
"[",
"example_qas_id",
"]",
"=",
"score_diff",
"if",
"score_diff",
">",
"null_score_diff_threshold",
":",
"all_predictions",
"[",
"example_qas_id",
"]",
"=",
"''",
"else",
":",
"all_predictions",
"[",
"example_qas_id",
"]",
"=",
"best_non_null_entry",
".",
"text",
"all_nbest_json",
"[",
"example_qas_id",
"]",
"=",
"nbest_json",
"return",
"all_predictions",
",",
"all_nbest_json",
",",
"scores_diff_json"
] | Get prediction results
Parameters
----------
dev_dataset: dataset
Examples of transform.
all_results: dict
A dictionary containing model prediction results.
tokenizer: callable
Tokenizer function.
max_answer_length: int, default 64
Maximum length of the answer tokens.
null_score_diff_threshold: float, default 0.0
If null_score - best_non_null is greater than the threshold predict null.
n_best_size: int, default 10
The total number of n-best predictions.
version_2: bool, default False
If true, the SQuAD examples contain some that do not have an answer.
Returns
-------
all_predictions: dict
All final predictions.
all_nbest_json: dict
All n-best predictions.
scores_diff_json: dict
Record the difference between null score and the score of best non-null.
when version_2 is True. | [
"Get",
"prediction",
"results"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/bert_qa_evaluate.py#L123-L323 | train |
dmlc/gluon-nlp | scripts/bert/bert_qa_evaluate.py | get_F1_EM | def get_F1_EM(dataset, predict_data):
"""Calculate the F1 and EM scores of the predicted results.
Use only with the SQuAD1.1 dataset.
Parameters
----------
dataset_file: string
Path to the data file.
predict_data: dict
All final predictions.
Returns
-------
scores: dict
F1 and EM scores.
"""
f1 = exact_match = total = 0
for record in dataset:
total += 1
if record[1] not in predict_data:
message = 'Unanswered question ' + record[1] + \
' will receive score 0.'
print(message)
continue
ground_truths = record[4]
prediction = predict_data[record[1]]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction,
ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
scores = {'exact_match': exact_match, 'f1': f1}
return scores | python | def get_F1_EM(dataset, predict_data):
"""Calculate the F1 and EM scores of the predicted results.
Use only with the SQuAD1.1 dataset.
Parameters
----------
dataset_file: string
Path to the data file.
predict_data: dict
All final predictions.
Returns
-------
scores: dict
F1 and EM scores.
"""
f1 = exact_match = total = 0
for record in dataset:
total += 1
if record[1] not in predict_data:
message = 'Unanswered question ' + record[1] + \
' will receive score 0.'
print(message)
continue
ground_truths = record[4]
prediction = predict_data[record[1]]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction,
ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
scores = {'exact_match': exact_match, 'f1': f1}
return scores | [
"def",
"get_F1_EM",
"(",
"dataset",
",",
"predict_data",
")",
":",
"f1",
"=",
"exact_match",
"=",
"total",
"=",
"0",
"for",
"record",
"in",
"dataset",
":",
"total",
"+=",
"1",
"if",
"record",
"[",
"1",
"]",
"not",
"in",
"predict_data",
":",
"message",
"=",
"'Unanswered question '",
"+",
"record",
"[",
"1",
"]",
"+",
"' will receive score 0.'",
"print",
"(",
"message",
")",
"continue",
"ground_truths",
"=",
"record",
"[",
"4",
"]",
"prediction",
"=",
"predict_data",
"[",
"record",
"[",
"1",
"]",
"]",
"exact_match",
"+=",
"metric_max_over_ground_truths",
"(",
"exact_match_score",
",",
"prediction",
",",
"ground_truths",
")",
"f1",
"+=",
"metric_max_over_ground_truths",
"(",
"f1_score",
",",
"prediction",
",",
"ground_truths",
")",
"exact_match",
"=",
"100.0",
"*",
"exact_match",
"/",
"total",
"f1",
"=",
"100.0",
"*",
"f1",
"/",
"total",
"scores",
"=",
"{",
"'exact_match'",
":",
"exact_match",
",",
"'f1'",
":",
"f1",
"}",
"return",
"scores"
] | Calculate the F1 and EM scores of the predicted results.
Use only with the SQuAD1.1 dataset.
Parameters
----------
dataset_file: string
Path to the data file.
predict_data: dict
All final predictions.
Returns
-------
scores: dict
F1 and EM scores. | [
"Calculate",
"the",
"F1",
"and",
"EM",
"scores",
"of",
"the",
"predicted",
"results",
".",
"Use",
"only",
"with",
"the",
"SQuAD1",
".",
"1",
"dataset",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/bert_qa_evaluate.py#L374-L409 | train |
dmlc/gluon-nlp | scripts/bert/finetune_classifier.py | preprocess_data | def preprocess_data(tokenizer, task, batch_size, dev_batch_size, max_len, pad=False):
"""Data preparation function."""
# transformation
trans = BERTDatasetTransform(
tokenizer,
max_len,
labels=task.get_labels(),
pad=pad,
pair=task.is_pair,
label_dtype='float32' if not task.get_labels() else 'int32')
data_train = task('train').transform(trans, lazy=False)
data_train_len = data_train.transform(
lambda input_id, length, segment_id, label_id: length)
num_samples_train = len(data_train)
# bucket sampler
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0),
nlp.data.batchify.Stack(
'float32' if not task.get_labels() else 'int32'))
batch_sampler = nlp.data.sampler.FixedBucketSampler(
data_train_len,
batch_size=batch_size,
num_buckets=10,
ratio=0,
shuffle=True)
# data loaders
dataloader_train = gluon.data.DataLoader(
dataset=data_train,
num_workers=1,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
if task.task_name == 'MNLI':
data_dev_matched = task('dev_matched').transform(trans, lazy=False)
data_dev_mismatched = task('dev_mismatched').transform(trans, lazy=False)
dataloader_dev_matched = mx.gluon.data.DataLoader(
data_dev_matched, batch_size=dev_batch_size,
num_workers=1, shuffle=False, batchify_fn=batchify_fn)
dataloader_dev_mismatched = mx.gluon.data.DataLoader(
data_dev_mismatched, batch_size=dev_batch_size,
num_workers=1, shuffle=False, batchify_fn=batchify_fn)
return dataloader_train, dataloader_dev_matched, \
dataloader_dev_mismatched, num_samples_train
else:
data_dev = task('dev').transform(trans, lazy=False)
dataloader_dev = mx.gluon.data.DataLoader(
data_dev,
batch_size=dev_batch_size,
num_workers=1,
shuffle=False,
batchify_fn=batchify_fn)
return dataloader_train, dataloader_dev, num_samples_train | python | def preprocess_data(tokenizer, task, batch_size, dev_batch_size, max_len, pad=False):
"""Data preparation function."""
# transformation
trans = BERTDatasetTransform(
tokenizer,
max_len,
labels=task.get_labels(),
pad=pad,
pair=task.is_pair,
label_dtype='float32' if not task.get_labels() else 'int32')
data_train = task('train').transform(trans, lazy=False)
data_train_len = data_train.transform(
lambda input_id, length, segment_id, label_id: length)
num_samples_train = len(data_train)
# bucket sampler
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0),
nlp.data.batchify.Stack(
'float32' if not task.get_labels() else 'int32'))
batch_sampler = nlp.data.sampler.FixedBucketSampler(
data_train_len,
batch_size=batch_size,
num_buckets=10,
ratio=0,
shuffle=True)
# data loaders
dataloader_train = gluon.data.DataLoader(
dataset=data_train,
num_workers=1,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
if task.task_name == 'MNLI':
data_dev_matched = task('dev_matched').transform(trans, lazy=False)
data_dev_mismatched = task('dev_mismatched').transform(trans, lazy=False)
dataloader_dev_matched = mx.gluon.data.DataLoader(
data_dev_matched, batch_size=dev_batch_size,
num_workers=1, shuffle=False, batchify_fn=batchify_fn)
dataloader_dev_mismatched = mx.gluon.data.DataLoader(
data_dev_mismatched, batch_size=dev_batch_size,
num_workers=1, shuffle=False, batchify_fn=batchify_fn)
return dataloader_train, dataloader_dev_matched, \
dataloader_dev_mismatched, num_samples_train
else:
data_dev = task('dev').transform(trans, lazy=False)
dataloader_dev = mx.gluon.data.DataLoader(
data_dev,
batch_size=dev_batch_size,
num_workers=1,
shuffle=False,
batchify_fn=batchify_fn)
return dataloader_train, dataloader_dev, num_samples_train | [
"def",
"preprocess_data",
"(",
"tokenizer",
",",
"task",
",",
"batch_size",
",",
"dev_batch_size",
",",
"max_len",
",",
"pad",
"=",
"False",
")",
":",
"# transformation",
"trans",
"=",
"BERTDatasetTransform",
"(",
"tokenizer",
",",
"max_len",
",",
"labels",
"=",
"task",
".",
"get_labels",
"(",
")",
",",
"pad",
"=",
"pad",
",",
"pair",
"=",
"task",
".",
"is_pair",
",",
"label_dtype",
"=",
"'float32'",
"if",
"not",
"task",
".",
"get_labels",
"(",
")",
"else",
"'int32'",
")",
"data_train",
"=",
"task",
"(",
"'train'",
")",
".",
"transform",
"(",
"trans",
",",
"lazy",
"=",
"False",
")",
"data_train_len",
"=",
"data_train",
".",
"transform",
"(",
"lambda",
"input_id",
",",
"length",
",",
"segment_id",
",",
"label_id",
":",
"length",
")",
"num_samples_train",
"=",
"len",
"(",
"data_train",
")",
"# bucket sampler",
"batchify_fn",
"=",
"nlp",
".",
"data",
".",
"batchify",
".",
"Tuple",
"(",
"nlp",
".",
"data",
".",
"batchify",
".",
"Pad",
"(",
"axis",
"=",
"0",
")",
",",
"nlp",
".",
"data",
".",
"batchify",
".",
"Stack",
"(",
")",
",",
"nlp",
".",
"data",
".",
"batchify",
".",
"Pad",
"(",
"axis",
"=",
"0",
")",
",",
"nlp",
".",
"data",
".",
"batchify",
".",
"Stack",
"(",
"'float32'",
"if",
"not",
"task",
".",
"get_labels",
"(",
")",
"else",
"'int32'",
")",
")",
"batch_sampler",
"=",
"nlp",
".",
"data",
".",
"sampler",
".",
"FixedBucketSampler",
"(",
"data_train_len",
",",
"batch_size",
"=",
"batch_size",
",",
"num_buckets",
"=",
"10",
",",
"ratio",
"=",
"0",
",",
"shuffle",
"=",
"True",
")",
"# data loaders",
"dataloader_train",
"=",
"gluon",
".",
"data",
".",
"DataLoader",
"(",
"dataset",
"=",
"data_train",
",",
"num_workers",
"=",
"1",
",",
"batch_sampler",
"=",
"batch_sampler",
",",
"batchify_fn",
"=",
"batchify_fn",
")",
"if",
"task",
".",
"task_name",
"==",
"'MNLI'",
":",
"data_dev_matched",
"=",
"task",
"(",
"'dev_matched'",
")",
".",
"transform",
"(",
"trans",
",",
"lazy",
"=",
"False",
")",
"data_dev_mismatched",
"=",
"task",
"(",
"'dev_mismatched'",
")",
".",
"transform",
"(",
"trans",
",",
"lazy",
"=",
"False",
")",
"dataloader_dev_matched",
"=",
"mx",
".",
"gluon",
".",
"data",
".",
"DataLoader",
"(",
"data_dev_matched",
",",
"batch_size",
"=",
"dev_batch_size",
",",
"num_workers",
"=",
"1",
",",
"shuffle",
"=",
"False",
",",
"batchify_fn",
"=",
"batchify_fn",
")",
"dataloader_dev_mismatched",
"=",
"mx",
".",
"gluon",
".",
"data",
".",
"DataLoader",
"(",
"data_dev_mismatched",
",",
"batch_size",
"=",
"dev_batch_size",
",",
"num_workers",
"=",
"1",
",",
"shuffle",
"=",
"False",
",",
"batchify_fn",
"=",
"batchify_fn",
")",
"return",
"dataloader_train",
",",
"dataloader_dev_matched",
",",
"dataloader_dev_mismatched",
",",
"num_samples_train",
"else",
":",
"data_dev",
"=",
"task",
"(",
"'dev'",
")",
".",
"transform",
"(",
"trans",
",",
"lazy",
"=",
"False",
")",
"dataloader_dev",
"=",
"mx",
".",
"gluon",
".",
"data",
".",
"DataLoader",
"(",
"data_dev",
",",
"batch_size",
"=",
"dev_batch_size",
",",
"num_workers",
"=",
"1",
",",
"shuffle",
"=",
"False",
",",
"batchify_fn",
"=",
"batchify_fn",
")",
"return",
"dataloader_train",
",",
"dataloader_dev",
",",
"num_samples_train"
] | Data preparation function. | [
"Data",
"preparation",
"function",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/finetune_classifier.py#L247-L301 | train |
dmlc/gluon-nlp | scripts/bert/finetune_classifier.py | evaluate | def evaluate(dataloader_eval, metric):
"""Evaluate the model on validation dataset.
"""
metric.reset()
for _, seqs in enumerate(dataloader_eval):
input_ids, valid_len, type_ids, label = seqs
out = model(
input_ids.as_in_context(ctx), type_ids.as_in_context(ctx),
valid_len.astype('float32').as_in_context(ctx))
metric.update([label], [out])
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val]
metric_str = 'validation metrics:' + ','.join(
[i + ':%.4f' for i in metric_nm])
logging.info(metric_str, *metric_val) | python | def evaluate(dataloader_eval, metric):
"""Evaluate the model on validation dataset.
"""
metric.reset()
for _, seqs in enumerate(dataloader_eval):
input_ids, valid_len, type_ids, label = seqs
out = model(
input_ids.as_in_context(ctx), type_ids.as_in_context(ctx),
valid_len.astype('float32').as_in_context(ctx))
metric.update([label], [out])
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val]
metric_str = 'validation metrics:' + ','.join(
[i + ':%.4f' for i in metric_nm])
logging.info(metric_str, *metric_val) | [
"def",
"evaluate",
"(",
"dataloader_eval",
",",
"metric",
")",
":",
"metric",
".",
"reset",
"(",
")",
"for",
"_",
",",
"seqs",
"in",
"enumerate",
"(",
"dataloader_eval",
")",
":",
"input_ids",
",",
"valid_len",
",",
"type_ids",
",",
"label",
"=",
"seqs",
"out",
"=",
"model",
"(",
"input_ids",
".",
"as_in_context",
"(",
"ctx",
")",
",",
"type_ids",
".",
"as_in_context",
"(",
"ctx",
")",
",",
"valid_len",
".",
"astype",
"(",
"'float32'",
")",
".",
"as_in_context",
"(",
"ctx",
")",
")",
"metric",
".",
"update",
"(",
"[",
"label",
"]",
",",
"[",
"out",
"]",
")",
"metric_nm",
",",
"metric_val",
"=",
"metric",
".",
"get",
"(",
")",
"if",
"not",
"isinstance",
"(",
"metric_nm",
",",
"list",
")",
":",
"metric_nm",
"=",
"[",
"metric_nm",
"]",
"metric_val",
"=",
"[",
"metric_val",
"]",
"metric_str",
"=",
"'validation metrics:'",
"+",
"','",
".",
"join",
"(",
"[",
"i",
"+",
"':%.4f'",
"for",
"i",
"in",
"metric_nm",
"]",
")",
"logging",
".",
"info",
"(",
"metric_str",
",",
"*",
"metric_val",
")"
] | Evaluate the model on validation dataset. | [
"Evaluate",
"the",
"model",
"on",
"validation",
"dataset",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/finetune_classifier.py#L314-L330 | train |
dmlc/gluon-nlp | scripts/bert/finetune_classifier.py | log_train | def log_train(batch_id, batch_num, metric, step_loss, log_interval, epoch_id, learning_rate):
"""Generate and print out the log message for training.
"""
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val]
train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num, \
step_loss / log_interval, \
learning_rate, \
*metric_val) | python | def log_train(batch_id, batch_num, metric, step_loss, log_interval, epoch_id, learning_rate):
"""Generate and print out the log message for training.
"""
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val]
train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num, \
step_loss / log_interval, \
learning_rate, \
*metric_val) | [
"def",
"log_train",
"(",
"batch_id",
",",
"batch_num",
",",
"metric",
",",
"step_loss",
",",
"log_interval",
",",
"epoch_id",
",",
"learning_rate",
")",
":",
"metric_nm",
",",
"metric_val",
"=",
"metric",
".",
"get",
"(",
")",
"if",
"not",
"isinstance",
"(",
"metric_nm",
",",
"list",
")",
":",
"metric_nm",
"=",
"[",
"metric_nm",
"]",
"metric_val",
"=",
"[",
"metric_val",
"]",
"train_str",
"=",
"'[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f, metrics:'",
"+",
"','",
".",
"join",
"(",
"[",
"i",
"+",
"':%.4f'",
"for",
"i",
"in",
"metric_nm",
"]",
")",
"logging",
".",
"info",
"(",
"train_str",
",",
"epoch_id",
"+",
"1",
",",
"batch_id",
"+",
"1",
",",
"batch_num",
",",
"step_loss",
"/",
"log_interval",
",",
"learning_rate",
",",
"*",
"metric_val",
")"
] | Generate and print out the log message for training. | [
"Generate",
"and",
"print",
"out",
"the",
"log",
"message",
"for",
"training",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/finetune_classifier.py#L333-L346 | train |
dmlc/gluon-nlp | scripts/bert/finetune_classifier.py | log_inference | def log_inference(batch_id, batch_num, metric, step_loss, log_interval):
"""Generate and print out the log message for inference.
"""
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val]
eval_str = '[Batch %d/%d] loss=%.4f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(eval_str, batch_id + 1, batch_num, \
step_loss / log_interval, \
*metric_val) | python | def log_inference(batch_id, batch_num, metric, step_loss, log_interval):
"""Generate and print out the log message for inference.
"""
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val]
eval_str = '[Batch %d/%d] loss=%.4f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(eval_str, batch_id + 1, batch_num, \
step_loss / log_interval, \
*metric_val) | [
"def",
"log_inference",
"(",
"batch_id",
",",
"batch_num",
",",
"metric",
",",
"step_loss",
",",
"log_interval",
")",
":",
"metric_nm",
",",
"metric_val",
"=",
"metric",
".",
"get",
"(",
")",
"if",
"not",
"isinstance",
"(",
"metric_nm",
",",
"list",
")",
":",
"metric_nm",
"=",
"[",
"metric_nm",
"]",
"metric_val",
"=",
"[",
"metric_val",
"]",
"eval_str",
"=",
"'[Batch %d/%d] loss=%.4f, metrics:'",
"+",
"','",
".",
"join",
"(",
"[",
"i",
"+",
"':%.4f'",
"for",
"i",
"in",
"metric_nm",
"]",
")",
"logging",
".",
"info",
"(",
"eval_str",
",",
"batch_id",
"+",
"1",
",",
"batch_num",
",",
"step_loss",
"/",
"log_interval",
",",
"*",
"metric_val",
")"
] | Generate and print out the log message for inference. | [
"Generate",
"and",
"print",
"out",
"the",
"log",
"message",
"for",
"inference",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/finetune_classifier.py#L349-L361 | train |
dmlc/gluon-nlp | scripts/bert/finetune_classifier.py | train | def train(metric):
"""Training function."""
logging.info('Now we are doing BERT classification training on %s!', ctx)
optimizer_params = {'learning_rate': lr, 'epsilon': epsilon, 'wd': 0.01}
try:
trainer = gluon.Trainer(
model.collect_params(),
args.optimizer,
optimizer_params,
update_on_kvstore=False)
except ValueError as e:
print(e)
warnings.warn(
'AdamW optimizer is not found. Please consider upgrading to '
'mxnet>=1.5.0. Now the original Adam optimizer is used instead.')
trainer = gluon.Trainer(
model.collect_params(),
'adam',
optimizer_params,
update_on_kvstore=False)
step_size = batch_size * accumulate if accumulate else batch_size
num_train_steps = int(num_train_examples / step_size * args.epochs)
warmup_ratio = args.warmup_ratio
num_warmup_steps = int(num_train_steps * warmup_ratio)
step_num = 0
# Do not apply weight decay on LayerNorm and bias terms
for _, v in model.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
# Collect differentiable parameters
params = [
p for p in model.collect_params().values() if p.grad_req != 'null'
]
# Set grad_req if gradient accumulation is required
if accumulate:
for p in params:
p.grad_req = 'add'
for epoch_id in range(args.epochs):
metric.reset()
step_loss = 0
tic = time.time()
for batch_id, seqs in enumerate(train_data):
# set grad to zero for gradient accumulation
if accumulate:
if batch_id % accumulate == 0:
model.collect_params().zero_grad()
step_num += 1
else:
step_num += 1
# learning rate schedule
if step_num < num_warmup_steps:
new_lr = lr * step_num / num_warmup_steps
else:
offset = (step_num - num_warmup_steps) * lr / (
num_train_steps - num_warmup_steps)
new_lr = lr - offset
trainer.set_learning_rate(new_lr)
# forward and backward
with mx.autograd.record():
input_ids, valid_length, type_ids, label = seqs
out = model(
input_ids.as_in_context(ctx), type_ids.as_in_context(ctx),
valid_length.astype('float32').as_in_context(ctx))
ls = loss_function(out, label.as_in_context(ctx)).mean()
ls.backward()
# update
if not accumulate or (batch_id + 1) % accumulate == 0:
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(params, 1)
trainer.update(accumulate if accumulate else 1)
step_loss += ls.asscalar()
metric.update([label], [out])
if (batch_id + 1) % (args.log_interval) == 0:
log_train(batch_id, len(train_data), metric, step_loss, args.log_interval,
epoch_id, trainer.learning_rate)
step_loss = 0
mx.nd.waitall()
if task.task_name == 'MNLI':
logging.info('On MNLI Matched: ')
evaluate(dev_data_matched, metric)
logging.info('On MNLI Mismatched: ')
evaluate(dev_data_mismatched, metric)
else:
evaluate(dev_data, metric)
# save params
params_saved = os.path.join(output_dir,
'model_bert_{0}_{1}.params'.format(task.task_name, epoch_id))
model.save_parameters(params_saved)
logging.info('params saved in : %s', params_saved)
toc = time.time()
logging.info('Time cost=%.2fs', toc - tic)
tic = toc | python | def train(metric):
"""Training function."""
logging.info('Now we are doing BERT classification training on %s!', ctx)
optimizer_params = {'learning_rate': lr, 'epsilon': epsilon, 'wd': 0.01}
try:
trainer = gluon.Trainer(
model.collect_params(),
args.optimizer,
optimizer_params,
update_on_kvstore=False)
except ValueError as e:
print(e)
warnings.warn(
'AdamW optimizer is not found. Please consider upgrading to '
'mxnet>=1.5.0. Now the original Adam optimizer is used instead.')
trainer = gluon.Trainer(
model.collect_params(),
'adam',
optimizer_params,
update_on_kvstore=False)
step_size = batch_size * accumulate if accumulate else batch_size
num_train_steps = int(num_train_examples / step_size * args.epochs)
warmup_ratio = args.warmup_ratio
num_warmup_steps = int(num_train_steps * warmup_ratio)
step_num = 0
# Do not apply weight decay on LayerNorm and bias terms
for _, v in model.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
# Collect differentiable parameters
params = [
p for p in model.collect_params().values() if p.grad_req != 'null'
]
# Set grad_req if gradient accumulation is required
if accumulate:
for p in params:
p.grad_req = 'add'
for epoch_id in range(args.epochs):
metric.reset()
step_loss = 0
tic = time.time()
for batch_id, seqs in enumerate(train_data):
# set grad to zero for gradient accumulation
if accumulate:
if batch_id % accumulate == 0:
model.collect_params().zero_grad()
step_num += 1
else:
step_num += 1
# learning rate schedule
if step_num < num_warmup_steps:
new_lr = lr * step_num / num_warmup_steps
else:
offset = (step_num - num_warmup_steps) * lr / (
num_train_steps - num_warmup_steps)
new_lr = lr - offset
trainer.set_learning_rate(new_lr)
# forward and backward
with mx.autograd.record():
input_ids, valid_length, type_ids, label = seqs
out = model(
input_ids.as_in_context(ctx), type_ids.as_in_context(ctx),
valid_length.astype('float32').as_in_context(ctx))
ls = loss_function(out, label.as_in_context(ctx)).mean()
ls.backward()
# update
if not accumulate or (batch_id + 1) % accumulate == 0:
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(params, 1)
trainer.update(accumulate if accumulate else 1)
step_loss += ls.asscalar()
metric.update([label], [out])
if (batch_id + 1) % (args.log_interval) == 0:
log_train(batch_id, len(train_data), metric, step_loss, args.log_interval,
epoch_id, trainer.learning_rate)
step_loss = 0
mx.nd.waitall()
if task.task_name == 'MNLI':
logging.info('On MNLI Matched: ')
evaluate(dev_data_matched, metric)
logging.info('On MNLI Mismatched: ')
evaluate(dev_data_mismatched, metric)
else:
evaluate(dev_data, metric)
# save params
params_saved = os.path.join(output_dir,
'model_bert_{0}_{1}.params'.format(task.task_name, epoch_id))
model.save_parameters(params_saved)
logging.info('params saved in : %s', params_saved)
toc = time.time()
logging.info('Time cost=%.2fs', toc - tic)
tic = toc | [
"def",
"train",
"(",
"metric",
")",
":",
"logging",
".",
"info",
"(",
"'Now we are doing BERT classification training on %s!'",
",",
"ctx",
")",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"lr",
",",
"'epsilon'",
":",
"epsilon",
",",
"'wd'",
":",
"0.01",
"}",
"try",
":",
"trainer",
"=",
"gluon",
".",
"Trainer",
"(",
"model",
".",
"collect_params",
"(",
")",
",",
"args",
".",
"optimizer",
",",
"optimizer_params",
",",
"update_on_kvstore",
"=",
"False",
")",
"except",
"ValueError",
"as",
"e",
":",
"print",
"(",
"e",
")",
"warnings",
".",
"warn",
"(",
"'AdamW optimizer is not found. Please consider upgrading to '",
"'mxnet>=1.5.0. Now the original Adam optimizer is used instead.'",
")",
"trainer",
"=",
"gluon",
".",
"Trainer",
"(",
"model",
".",
"collect_params",
"(",
")",
",",
"'adam'",
",",
"optimizer_params",
",",
"update_on_kvstore",
"=",
"False",
")",
"step_size",
"=",
"batch_size",
"*",
"accumulate",
"if",
"accumulate",
"else",
"batch_size",
"num_train_steps",
"=",
"int",
"(",
"num_train_examples",
"/",
"step_size",
"*",
"args",
".",
"epochs",
")",
"warmup_ratio",
"=",
"args",
".",
"warmup_ratio",
"num_warmup_steps",
"=",
"int",
"(",
"num_train_steps",
"*",
"warmup_ratio",
")",
"step_num",
"=",
"0",
"# Do not apply weight decay on LayerNorm and bias terms",
"for",
"_",
",",
"v",
"in",
"model",
".",
"collect_params",
"(",
"'.*beta|.*gamma|.*bias'",
")",
".",
"items",
"(",
")",
":",
"v",
".",
"wd_mult",
"=",
"0.0",
"# Collect differentiable parameters",
"params",
"=",
"[",
"p",
"for",
"p",
"in",
"model",
".",
"collect_params",
"(",
")",
".",
"values",
"(",
")",
"if",
"p",
".",
"grad_req",
"!=",
"'null'",
"]",
"# Set grad_req if gradient accumulation is required",
"if",
"accumulate",
":",
"for",
"p",
"in",
"params",
":",
"p",
".",
"grad_req",
"=",
"'add'",
"for",
"epoch_id",
"in",
"range",
"(",
"args",
".",
"epochs",
")",
":",
"metric",
".",
"reset",
"(",
")",
"step_loss",
"=",
"0",
"tic",
"=",
"time",
".",
"time",
"(",
")",
"for",
"batch_id",
",",
"seqs",
"in",
"enumerate",
"(",
"train_data",
")",
":",
"# set grad to zero for gradient accumulation",
"if",
"accumulate",
":",
"if",
"batch_id",
"%",
"accumulate",
"==",
"0",
":",
"model",
".",
"collect_params",
"(",
")",
".",
"zero_grad",
"(",
")",
"step_num",
"+=",
"1",
"else",
":",
"step_num",
"+=",
"1",
"# learning rate schedule",
"if",
"step_num",
"<",
"num_warmup_steps",
":",
"new_lr",
"=",
"lr",
"*",
"step_num",
"/",
"num_warmup_steps",
"else",
":",
"offset",
"=",
"(",
"step_num",
"-",
"num_warmup_steps",
")",
"*",
"lr",
"/",
"(",
"num_train_steps",
"-",
"num_warmup_steps",
")",
"new_lr",
"=",
"lr",
"-",
"offset",
"trainer",
".",
"set_learning_rate",
"(",
"new_lr",
")",
"# forward and backward",
"with",
"mx",
".",
"autograd",
".",
"record",
"(",
")",
":",
"input_ids",
",",
"valid_length",
",",
"type_ids",
",",
"label",
"=",
"seqs",
"out",
"=",
"model",
"(",
"input_ids",
".",
"as_in_context",
"(",
"ctx",
")",
",",
"type_ids",
".",
"as_in_context",
"(",
"ctx",
")",
",",
"valid_length",
".",
"astype",
"(",
"'float32'",
")",
".",
"as_in_context",
"(",
"ctx",
")",
")",
"ls",
"=",
"loss_function",
"(",
"out",
",",
"label",
".",
"as_in_context",
"(",
"ctx",
")",
")",
".",
"mean",
"(",
")",
"ls",
".",
"backward",
"(",
")",
"# update",
"if",
"not",
"accumulate",
"or",
"(",
"batch_id",
"+",
"1",
")",
"%",
"accumulate",
"==",
"0",
":",
"trainer",
".",
"allreduce_grads",
"(",
")",
"nlp",
".",
"utils",
".",
"clip_grad_global_norm",
"(",
"params",
",",
"1",
")",
"trainer",
".",
"update",
"(",
"accumulate",
"if",
"accumulate",
"else",
"1",
")",
"step_loss",
"+=",
"ls",
".",
"asscalar",
"(",
")",
"metric",
".",
"update",
"(",
"[",
"label",
"]",
",",
"[",
"out",
"]",
")",
"if",
"(",
"batch_id",
"+",
"1",
")",
"%",
"(",
"args",
".",
"log_interval",
")",
"==",
"0",
":",
"log_train",
"(",
"batch_id",
",",
"len",
"(",
"train_data",
")",
",",
"metric",
",",
"step_loss",
",",
"args",
".",
"log_interval",
",",
"epoch_id",
",",
"trainer",
".",
"learning_rate",
")",
"step_loss",
"=",
"0",
"mx",
".",
"nd",
".",
"waitall",
"(",
")",
"if",
"task",
".",
"task_name",
"==",
"'MNLI'",
":",
"logging",
".",
"info",
"(",
"'On MNLI Matched: '",
")",
"evaluate",
"(",
"dev_data_matched",
",",
"metric",
")",
"logging",
".",
"info",
"(",
"'On MNLI Mismatched: '",
")",
"evaluate",
"(",
"dev_data_mismatched",
",",
"metric",
")",
"else",
":",
"evaluate",
"(",
"dev_data",
",",
"metric",
")",
"# save params",
"params_saved",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'model_bert_{0}_{1}.params'",
".",
"format",
"(",
"task",
".",
"task_name",
",",
"epoch_id",
")",
")",
"model",
".",
"save_parameters",
"(",
"params_saved",
")",
"logging",
".",
"info",
"(",
"'params saved in : %s'",
",",
"params_saved",
")",
"toc",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"info",
"(",
"'Time cost=%.2fs'",
",",
"toc",
"-",
"tic",
")",
"tic",
"=",
"toc"
] | Training function. | [
"Training",
"function",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/finetune_classifier.py#L364-L459 | train |
dmlc/gluon-nlp | scripts/bert/finetune_classifier.py | inference | def inference(metric):
"""Inference function."""
logging.info('Now we are doing BERT classification inference on %s!', ctx)
model = BERTClassifier(bert, dropout=0.1, num_classes=len(task.get_labels()))
model.hybridize(static_alloc=True)
model.load_parameters(model_parameters, ctx=ctx)
metric.reset()
step_loss = 0
tic = time.time()
for batch_id, seqs in enumerate(dev_data):
input_ids, valid_length, type_ids, label = seqs
out = model(input_ids.as_in_context(ctx),
type_ids.as_in_context(ctx),
valid_length.astype('float32').as_in_context(ctx))
ls = loss_function(out, label.as_in_context(ctx)).mean()
step_loss += ls.asscalar()
metric.update([label], [out])
if (batch_id + 1) % (args.log_interval) == 0:
log_inference(batch_id, len(dev_data), metric, step_loss, args.log_interval)
step_loss = 0
mx.nd.waitall()
toc = time.time()
total_num = dev_batch_size * len(dev_data)
logging.info('Time cost=%.2fs, throughput=%.2fsamples/s', toc - tic, \
total_num / (toc - tic)) | python | def inference(metric):
"""Inference function."""
logging.info('Now we are doing BERT classification inference on %s!', ctx)
model = BERTClassifier(bert, dropout=0.1, num_classes=len(task.get_labels()))
model.hybridize(static_alloc=True)
model.load_parameters(model_parameters, ctx=ctx)
metric.reset()
step_loss = 0
tic = time.time()
for batch_id, seqs in enumerate(dev_data):
input_ids, valid_length, type_ids, label = seqs
out = model(input_ids.as_in_context(ctx),
type_ids.as_in_context(ctx),
valid_length.astype('float32').as_in_context(ctx))
ls = loss_function(out, label.as_in_context(ctx)).mean()
step_loss += ls.asscalar()
metric.update([label], [out])
if (batch_id + 1) % (args.log_interval) == 0:
log_inference(batch_id, len(dev_data), metric, step_loss, args.log_interval)
step_loss = 0
mx.nd.waitall()
toc = time.time()
total_num = dev_batch_size * len(dev_data)
logging.info('Time cost=%.2fs, throughput=%.2fsamples/s', toc - tic, \
total_num / (toc - tic)) | [
"def",
"inference",
"(",
"metric",
")",
":",
"logging",
".",
"info",
"(",
"'Now we are doing BERT classification inference on %s!'",
",",
"ctx",
")",
"model",
"=",
"BERTClassifier",
"(",
"bert",
",",
"dropout",
"=",
"0.1",
",",
"num_classes",
"=",
"len",
"(",
"task",
".",
"get_labels",
"(",
")",
")",
")",
"model",
".",
"hybridize",
"(",
"static_alloc",
"=",
"True",
")",
"model",
".",
"load_parameters",
"(",
"model_parameters",
",",
"ctx",
"=",
"ctx",
")",
"metric",
".",
"reset",
"(",
")",
"step_loss",
"=",
"0",
"tic",
"=",
"time",
".",
"time",
"(",
")",
"for",
"batch_id",
",",
"seqs",
"in",
"enumerate",
"(",
"dev_data",
")",
":",
"input_ids",
",",
"valid_length",
",",
"type_ids",
",",
"label",
"=",
"seqs",
"out",
"=",
"model",
"(",
"input_ids",
".",
"as_in_context",
"(",
"ctx",
")",
",",
"type_ids",
".",
"as_in_context",
"(",
"ctx",
")",
",",
"valid_length",
".",
"astype",
"(",
"'float32'",
")",
".",
"as_in_context",
"(",
"ctx",
")",
")",
"ls",
"=",
"loss_function",
"(",
"out",
",",
"label",
".",
"as_in_context",
"(",
"ctx",
")",
")",
".",
"mean",
"(",
")",
"step_loss",
"+=",
"ls",
".",
"asscalar",
"(",
")",
"metric",
".",
"update",
"(",
"[",
"label",
"]",
",",
"[",
"out",
"]",
")",
"if",
"(",
"batch_id",
"+",
"1",
")",
"%",
"(",
"args",
".",
"log_interval",
")",
"==",
"0",
":",
"log_inference",
"(",
"batch_id",
",",
"len",
"(",
"dev_data",
")",
",",
"metric",
",",
"step_loss",
",",
"args",
".",
"log_interval",
")",
"step_loss",
"=",
"0",
"mx",
".",
"nd",
".",
"waitall",
"(",
")",
"toc",
"=",
"time",
".",
"time",
"(",
")",
"total_num",
"=",
"dev_batch_size",
"*",
"len",
"(",
"dev_data",
")",
"logging",
".",
"info",
"(",
"'Time cost=%.2fs, throughput=%.2fsamples/s'",
",",
"toc",
"-",
"tic",
",",
"total_num",
"/",
"(",
"toc",
"-",
"tic",
")",
")"
] | Inference function. | [
"Inference",
"function",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/finetune_classifier.py#L462-L492 | train |
dmlc/gluon-nlp | scripts/question_answering/data_processing.py | preprocess_dataset | def preprocess_dataset(dataset, question_max_length, context_max_length):
"""Process SQuAD dataset by creating NDArray version of data
:param Dataset dataset: SQuAD dataset
:param int question_max_length: Maximum length of question (padded or trimmed to that size)
:param int context_max_length: Maximum length of context (padded or trimmed to that size)
Returns
-------
SimpleDataset
Dataset of preprocessed records
"""
vocab_provider = VocabProvider(dataset)
transformer = SQuADTransform(
vocab_provider, question_max_length, context_max_length)
processed_dataset = SimpleDataset(
dataset.transform(transformer, lazy=False))
return processed_dataset | python | def preprocess_dataset(dataset, question_max_length, context_max_length):
"""Process SQuAD dataset by creating NDArray version of data
:param Dataset dataset: SQuAD dataset
:param int question_max_length: Maximum length of question (padded or trimmed to that size)
:param int context_max_length: Maximum length of context (padded or trimmed to that size)
Returns
-------
SimpleDataset
Dataset of preprocessed records
"""
vocab_provider = VocabProvider(dataset)
transformer = SQuADTransform(
vocab_provider, question_max_length, context_max_length)
processed_dataset = SimpleDataset(
dataset.transform(transformer, lazy=False))
return processed_dataset | [
"def",
"preprocess_dataset",
"(",
"dataset",
",",
"question_max_length",
",",
"context_max_length",
")",
":",
"vocab_provider",
"=",
"VocabProvider",
"(",
"dataset",
")",
"transformer",
"=",
"SQuADTransform",
"(",
"vocab_provider",
",",
"question_max_length",
",",
"context_max_length",
")",
"processed_dataset",
"=",
"SimpleDataset",
"(",
"dataset",
".",
"transform",
"(",
"transformer",
",",
"lazy",
"=",
"False",
")",
")",
"return",
"processed_dataset"
] | Process SQuAD dataset by creating NDArray version of data
:param Dataset dataset: SQuAD dataset
:param int question_max_length: Maximum length of question (padded or trimmed to that size)
:param int context_max_length: Maximum length of context (padded or trimmed to that size)
Returns
-------
SimpleDataset
Dataset of preprocessed records | [
"Process",
"SQuAD",
"dataset",
"by",
"creating",
"NDArray",
"version",
"of",
"data"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/question_answering/data_processing.py#L34-L51 | train |
dmlc/gluon-nlp | scripts/question_answering/data_processing.py | SQuADTransform._get_answer_spans | def _get_answer_spans(answer_list, answer_start_list):
"""Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_end_index) per question
"""
return [(answer_start_list[i], answer_start_list[i] + len(answer))
for i, answer in enumerate(answer_list)] | python | def _get_answer_spans(answer_list, answer_start_list):
"""Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_end_index) per question
"""
return [(answer_start_list[i], answer_start_list[i] + len(answer))
for i, answer in enumerate(answer_list)] | [
"def",
"_get_answer_spans",
"(",
"answer_list",
",",
"answer_start_list",
")",
":",
"return",
"[",
"(",
"answer_start_list",
"[",
"i",
"]",
",",
"answer_start_list",
"[",
"i",
"]",
"+",
"len",
"(",
"answer",
")",
")",
"for",
"i",
",",
"answer",
"in",
"enumerate",
"(",
"answer_list",
")",
"]"
] | Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_end_index) per question | [
"Find",
"all",
"answer",
"spans",
"from",
"the",
"context",
"returning",
"start_index",
"and",
"end_index"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/question_answering/data_processing.py#L98-L110 | train |
dmlc/gluon-nlp | scripts/question_answering/data_processing.py | VocabProvider.get_word_level_vocab | def get_word_level_vocab(self):
"""Provides word level vocabulary
Returns
-------
Vocab
Word level vocabulary
"""
def simple_tokenize(source_str, token_delim=' ', seq_delim='\n'):
return list(filter(None, re.split(token_delim + '|' + seq_delim, source_str)))
return VocabProvider._create_squad_vocab(simple_tokenize, self._dataset) | python | def get_word_level_vocab(self):
"""Provides word level vocabulary
Returns
-------
Vocab
Word level vocabulary
"""
def simple_tokenize(source_str, token_delim=' ', seq_delim='\n'):
return list(filter(None, re.split(token_delim + '|' + seq_delim, source_str)))
return VocabProvider._create_squad_vocab(simple_tokenize, self._dataset) | [
"def",
"get_word_level_vocab",
"(",
"self",
")",
":",
"def",
"simple_tokenize",
"(",
"source_str",
",",
"token_delim",
"=",
"' '",
",",
"seq_delim",
"=",
"'\\n'",
")",
":",
"return",
"list",
"(",
"filter",
"(",
"None",
",",
"re",
".",
"split",
"(",
"token_delim",
"+",
"'|'",
"+",
"seq_delim",
",",
"source_str",
")",
")",
")",
"return",
"VocabProvider",
".",
"_create_squad_vocab",
"(",
"simple_tokenize",
",",
"self",
".",
"_dataset",
")"
] | Provides word level vocabulary
Returns
-------
Vocab
Word level vocabulary | [
"Provides",
"word",
"level",
"vocabulary"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/question_answering/data_processing.py#L130-L142 | train |
dmlc/gluon-nlp | src/gluonnlp/loss/activation_regularizer.py | TemporalActivationRegularizationLoss.hybrid_forward | def hybrid_forward(self, F, *states): # pylint: disable=arguments-differ
"""
Parameters
----------
states : list
the stack outputs from RNN, which consists of output from each time step (TNC).
Returns
--------
loss : NDArray
loss tensor with shape (batch_size,). Dimensions other than batch_axis are averaged out.
"""
# pylint: disable=unused-argument
if self._beta != 0:
if states:
means = [self._beta * (state[1:] - state[:-1]).__pow__(2).mean()
for state in states[-1:]]
return F.add_n(*means)
else:
return F.zeros(1)
return F.zeros(1) | python | def hybrid_forward(self, F, *states): # pylint: disable=arguments-differ
"""
Parameters
----------
states : list
the stack outputs from RNN, which consists of output from each time step (TNC).
Returns
--------
loss : NDArray
loss tensor with shape (batch_size,). Dimensions other than batch_axis are averaged out.
"""
# pylint: disable=unused-argument
if self._beta != 0:
if states:
means = [self._beta * (state[1:] - state[:-1]).__pow__(2).mean()
for state in states[-1:]]
return F.add_n(*means)
else:
return F.zeros(1)
return F.zeros(1) | [
"def",
"hybrid_forward",
"(",
"self",
",",
"F",
",",
"*",
"states",
")",
":",
"# pylint: disable=arguments-differ",
"# pylint: disable=unused-argument",
"if",
"self",
".",
"_beta",
"!=",
"0",
":",
"if",
"states",
":",
"means",
"=",
"[",
"self",
".",
"_beta",
"*",
"(",
"state",
"[",
"1",
":",
"]",
"-",
"state",
"[",
":",
"-",
"1",
"]",
")",
".",
"__pow__",
"(",
"2",
")",
".",
"mean",
"(",
")",
"for",
"state",
"in",
"states",
"[",
"-",
"1",
":",
"]",
"]",
"return",
"F",
".",
"add_n",
"(",
"*",
"means",
")",
"else",
":",
"return",
"F",
".",
"zeros",
"(",
"1",
")",
"return",
"F",
".",
"zeros",
"(",
"1",
")"
] | Parameters
----------
states : list
the stack outputs from RNN, which consists of output from each time step (TNC).
Returns
--------
loss : NDArray
loss tensor with shape (batch_size,). Dimensions other than batch_axis are averaged out. | [
"Parameters",
"----------",
"states",
":",
"list",
"the",
"stack",
"outputs",
"from",
"RNN",
"which",
"consists",
"of",
"output",
"from",
"each",
"time",
"step",
"(",
"TNC",
")",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/loss/activation_regularizer.py#L128-L148 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTBasicTokenizer._tokenize | def _tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = self._whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.lower:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = self._whitespace_tokenize(' '.join(split_tokens))
return output_tokens | python | def _tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = self._whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.lower:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = self._whitespace_tokenize(' '.join(split_tokens))
return output_tokens | [
"def",
"_tokenize",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"self",
".",
"_clean_text",
"(",
"text",
")",
"# This was added on November 1st, 2018 for the multilingual and Chinese",
"# models. This is also applied to the English models now, but it doesn't",
"# matter since the English models were not trained on any Chinese data",
"# and generally don't have any Chinese data in them (there are Chinese",
"# characters in the vocabulary because Wikipedia does have some Chinese",
"# words in the English Wikipedia.).",
"text",
"=",
"self",
".",
"_tokenize_chinese_chars",
"(",
"text",
")",
"orig_tokens",
"=",
"self",
".",
"_whitespace_tokenize",
"(",
"text",
")",
"split_tokens",
"=",
"[",
"]",
"for",
"token",
"in",
"orig_tokens",
":",
"if",
"self",
".",
"lower",
":",
"token",
"=",
"token",
".",
"lower",
"(",
")",
"token",
"=",
"self",
".",
"_run_strip_accents",
"(",
"token",
")",
"split_tokens",
".",
"extend",
"(",
"self",
".",
"_run_split_on_punc",
"(",
"token",
")",
")",
"output_tokens",
"=",
"self",
".",
"_whitespace_tokenize",
"(",
"' '",
".",
"join",
"(",
"split_tokens",
")",
")",
"return",
"output_tokens"
] | Tokenizes a piece of text. | [
"Tokenizes",
"a",
"piece",
"of",
"text",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L758-L778 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTBasicTokenizer._clean_text | def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp in (0, 0xfffd) or self._is_control(char):
continue
if self._is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) | python | def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp in (0, 0xfffd) or self._is_control(char):
continue
if self._is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) | [
"def",
"_clean_text",
"(",
"self",
",",
"text",
")",
":",
"output",
"=",
"[",
"]",
"for",
"char",
"in",
"text",
":",
"cp",
"=",
"ord",
"(",
"char",
")",
"if",
"cp",
"in",
"(",
"0",
",",
"0xfffd",
")",
"or",
"self",
".",
"_is_control",
"(",
"char",
")",
":",
"continue",
"if",
"self",
".",
"_is_whitespace",
"(",
"char",
")",
":",
"output",
".",
"append",
"(",
"' '",
")",
"else",
":",
"output",
".",
"append",
"(",
"char",
")",
"return",
"''",
".",
"join",
"(",
"output",
")"
] | Performs invalid character removal and whitespace cleanup on text. | [
"Performs",
"invalid",
"character",
"removal",
"and",
"whitespace",
"cleanup",
"on",
"text",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L780-L791 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTBasicTokenizer._is_control | def _is_control(self, char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char in ['\t', '\n', '\r']:
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False | python | def _is_control(self, char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char in ['\t', '\n', '\r']:
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False | [
"def",
"_is_control",
"(",
"self",
",",
"char",
")",
":",
"# These are technically control characters but we count them as whitespace",
"# characters.",
"if",
"char",
"in",
"[",
"'\\t'",
",",
"'\\n'",
",",
"'\\r'",
"]",
":",
"return",
"False",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
".",
"startswith",
"(",
"'C'",
")",
":",
"return",
"True",
"return",
"False"
] | Checks whether `chars` is a control character. | [
"Checks",
"whether",
"chars",
"is",
"a",
"control",
"character",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L793-L802 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTBasicTokenizer._run_split_on_punc | def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if self._is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return [''.join(x) for x in output] | python | def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if self._is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return [''.join(x) for x in output] | [
"def",
"_run_split_on_punc",
"(",
"self",
",",
"text",
")",
":",
"chars",
"=",
"list",
"(",
"text",
")",
"i",
"=",
"0",
"start_new_word",
"=",
"True",
"output",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"chars",
")",
":",
"char",
"=",
"chars",
"[",
"i",
"]",
"if",
"self",
".",
"_is_punctuation",
"(",
"char",
")",
":",
"output",
".",
"append",
"(",
"[",
"char",
"]",
")",
"start_new_word",
"=",
"True",
"else",
":",
"if",
"start_new_word",
":",
"output",
".",
"append",
"(",
"[",
"]",
")",
"start_new_word",
"=",
"False",
"output",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"char",
")",
"i",
"+=",
"1",
"return",
"[",
"''",
".",
"join",
"(",
"x",
")",
"for",
"x",
"in",
"output",
"]"
] | Splits punctuation on a piece of text. | [
"Splits",
"punctuation",
"on",
"a",
"piece",
"of",
"text",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L850-L868 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTBasicTokenizer._is_punctuation | def _is_punctuation(self, char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
group0 = cp >= 33 and cp <= 47
group1 = cp >= 58 and cp <= 64
group2 = cp >= 91 and cp <= 96
group3 = cp >= 123 and cp <= 126
if (group0 or group1 or group2 or group3):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False | python | def _is_punctuation(self, char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
group0 = cp >= 33 and cp <= 47
group1 = cp >= 58 and cp <= 64
group2 = cp >= 91 and cp <= 96
group3 = cp >= 123 and cp <= 126
if (group0 or group1 or group2 or group3):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False | [
"def",
"_is_punctuation",
"(",
"self",
",",
"char",
")",
":",
"cp",
"=",
"ord",
"(",
"char",
")",
"# We treat all non-letter/number ASCII as punctuation.",
"# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode",
"# Punctuation class but we treat them as punctuation anyways, for",
"# consistency.",
"group0",
"=",
"cp",
">=",
"33",
"and",
"cp",
"<=",
"47",
"group1",
"=",
"cp",
">=",
"58",
"and",
"cp",
"<=",
"64",
"group2",
"=",
"cp",
">=",
"91",
"and",
"cp",
"<=",
"96",
"group3",
"=",
"cp",
">=",
"123",
"and",
"cp",
"<=",
"126",
"if",
"(",
"group0",
"or",
"group1",
"or",
"group2",
"or",
"group3",
")",
":",
"return",
"True",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
".",
"startswith",
"(",
"'P'",
")",
":",
"return",
"True",
"return",
"False"
] | Checks whether `chars` is a punctuation character. | [
"Checks",
"whether",
"chars",
"is",
"a",
"punctuation",
"character",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L870-L886 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTBasicTokenizer._is_whitespace | def _is_whitespace(self, char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char in [' ', '\t', '\n', '\r']:
return True
cat = unicodedata.category(char)
if cat == 'Zs':
return True
return False | python | def _is_whitespace(self, char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char in [' ', '\t', '\n', '\r']:
return True
cat = unicodedata.category(char)
if cat == 'Zs':
return True
return False | [
"def",
"_is_whitespace",
"(",
"self",
",",
"char",
")",
":",
"# \\t, \\n, and \\r are technically contorl characters but we treat them",
"# as whitespace since they are generally considered as such.",
"if",
"char",
"in",
"[",
"' '",
",",
"'\\t'",
",",
"'\\n'",
",",
"'\\r'",
"]",
":",
"return",
"True",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
"==",
"'Zs'",
":",
"return",
"True",
"return",
"False"
] | Checks whether `chars` is a whitespace character. | [
"Checks",
"whether",
"chars",
"is",
"a",
"whitespace",
"character",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L888-L897 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTBasicTokenizer._whitespace_tokenize | def _whitespace_tokenize(self, text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
tokens = text.split()
return tokens | python | def _whitespace_tokenize(self, text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
tokens = text.split()
return tokens | [
"def",
"_whitespace_tokenize",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"tokens",
"=",
"text",
".",
"split",
"(",
")",
"return",
"tokens"
] | Runs basic whitespace cleaning and splitting on a piece of text. | [
"Runs",
"basic",
"whitespace",
"cleaning",
"and",
"splitting",
"on",
"a",
"piece",
"of",
"text",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L899-L903 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTTokenizer._tokenize_wordpiece | def _tokenize_wordpiece(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BERTBasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in self.basic_tokenizer._whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.vocab.unknown_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = ''.join(chars[start:end])
if start > 0:
substr = '##' + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.vocab.unknown_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens | python | def _tokenize_wordpiece(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BERTBasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in self.basic_tokenizer._whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.vocab.unknown_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = ''.join(chars[start:end])
if start > 0:
substr = '##' + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.vocab.unknown_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens | [
"def",
"_tokenize_wordpiece",
"(",
"self",
",",
"text",
")",
":",
"output_tokens",
"=",
"[",
"]",
"for",
"token",
"in",
"self",
".",
"basic_tokenizer",
".",
"_whitespace_tokenize",
"(",
"text",
")",
":",
"chars",
"=",
"list",
"(",
"token",
")",
"if",
"len",
"(",
"chars",
")",
">",
"self",
".",
"max_input_chars_per_word",
":",
"output_tokens",
".",
"append",
"(",
"self",
".",
"vocab",
".",
"unknown_token",
")",
"continue",
"is_bad",
"=",
"False",
"start",
"=",
"0",
"sub_tokens",
"=",
"[",
"]",
"while",
"start",
"<",
"len",
"(",
"chars",
")",
":",
"end",
"=",
"len",
"(",
"chars",
")",
"cur_substr",
"=",
"None",
"while",
"start",
"<",
"end",
":",
"substr",
"=",
"''",
".",
"join",
"(",
"chars",
"[",
"start",
":",
"end",
"]",
")",
"if",
"start",
">",
"0",
":",
"substr",
"=",
"'##'",
"+",
"substr",
"if",
"substr",
"in",
"self",
".",
"vocab",
":",
"cur_substr",
"=",
"substr",
"break",
"end",
"-=",
"1",
"if",
"cur_substr",
"is",
"None",
":",
"is_bad",
"=",
"True",
"break",
"sub_tokens",
".",
"append",
"(",
"cur_substr",
")",
"start",
"=",
"end",
"if",
"is_bad",
":",
"output_tokens",
".",
"append",
"(",
"self",
".",
"vocab",
".",
"unknown_token",
")",
"else",
":",
"output_tokens",
".",
"extend",
"(",
"sub_tokens",
")",
"return",
"output_tokens"
] | Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BERTBasicTokenizer.
Returns:
A list of wordpiece tokens. | [
"Tokenizes",
"a",
"piece",
"of",
"text",
"into",
"its",
"word",
"pieces",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L960-L1007 | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | BERTSentenceTransform._truncate_seq_pair | def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() | python | def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() | [
"def",
"_truncate_seq_pair",
"(",
"self",
",",
"tokens_a",
",",
"tokens_b",
",",
"max_length",
")",
":",
"# This is a simple heuristic which will always truncate the longer sequence",
"# one token at a time. This makes more sense than truncating an equal percent",
"# of tokens from each, since if one sequence is very short then each token",
"# that's truncated likely contains more information than a longer sequence.",
"while",
"True",
":",
"total_length",
"=",
"len",
"(",
"tokens_a",
")",
"+",
"len",
"(",
"tokens_b",
")",
"if",
"total_length",
"<=",
"max_length",
":",
"break",
"if",
"len",
"(",
"tokens_a",
")",
">",
"len",
"(",
"tokens_b",
")",
":",
"tokens_a",
".",
"pop",
"(",
")",
"else",
":",
"tokens_b",
".",
"pop",
"(",
")"
] | Truncates a sequence pair in place to the maximum length. | [
"Truncates",
"a",
"sequence",
"pair",
"in",
"place",
"to",
"the",
"maximum",
"length",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L1144-L1157 | train |
dmlc/gluon-nlp | scripts/word_embeddings/evaluate_pretrained.py | get_args | def get_args():
"""Construct the argument parser."""
parser = argparse.ArgumentParser(
description='Word embedding evaluation with Gluon.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Embeddings arguments
group = parser.add_argument_group('Embedding arguments')
group.add_argument('--embedding-path', type=str,
help='Path to a .vec in Word2Vec text foramt or '
'.bin binary fastText model file. ')
group.add_argument('--embedding-name', type=str,
help=('Name of embedding type to load. '
'Valid entries: {}'.format(
', '.join(
nlp.embedding.list_sources().keys()))))
group.add_argument('--embedding-source', type=str,
help=('Source from which to initialize the embedding.'
'Pass --list-embedding-sources to get a list of '
'valid sources for a given --embedding-name.'))
group.add_argument(
'--fasttext-load-ngrams',
action='store_true',
help=('Specify load_ngrams=True '
'when loading pretrained fastText embedding.'))
group.add_argument(
'--analogy-max-vocab-size', type=int, default=None,
help=('Only retain the X first tokens from the pre-trained embedding. '
'The tokens are ordered by decreasing frequency.'
'As the analogy task takes the whole vocabulary into account, '
'removing very infrequent words improves performance.'))
group.add_argument('--list-embedding-sources', action='store_true')
# Computation options
group = parser.add_argument_group('Computation arguments')
group.add_argument('--batch-size', type=int, default=1024,
help='Batch size to use on analogy task. '
'Decrease batch size if evaluation crashes.')
group.add_argument('--gpu', type=int,
help=('Number (index) of GPU to run on, e.g. 0. '
'If not specified, uses CPU.'))
group.add_argument('--no-hybridize', action='store_true',
help='Disable hybridization of gluon HybridBlocks.')
# Logging
group = parser.add_argument_group('Logging arguments')
group.add_argument('--logdir', type=str, default='logs',
help='Directory to store logs.')
# Evaluation options
evaluation.add_parameters(parser)
args = parser.parse_args()
validate_args(args)
evaluation.validate_args(args)
return args | python | def get_args():
"""Construct the argument parser."""
parser = argparse.ArgumentParser(
description='Word embedding evaluation with Gluon.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Embeddings arguments
group = parser.add_argument_group('Embedding arguments')
group.add_argument('--embedding-path', type=str,
help='Path to a .vec in Word2Vec text foramt or '
'.bin binary fastText model file. ')
group.add_argument('--embedding-name', type=str,
help=('Name of embedding type to load. '
'Valid entries: {}'.format(
', '.join(
nlp.embedding.list_sources().keys()))))
group.add_argument('--embedding-source', type=str,
help=('Source from which to initialize the embedding.'
'Pass --list-embedding-sources to get a list of '
'valid sources for a given --embedding-name.'))
group.add_argument(
'--fasttext-load-ngrams',
action='store_true',
help=('Specify load_ngrams=True '
'when loading pretrained fastText embedding.'))
group.add_argument(
'--analogy-max-vocab-size', type=int, default=None,
help=('Only retain the X first tokens from the pre-trained embedding. '
'The tokens are ordered by decreasing frequency.'
'As the analogy task takes the whole vocabulary into account, '
'removing very infrequent words improves performance.'))
group.add_argument('--list-embedding-sources', action='store_true')
# Computation options
group = parser.add_argument_group('Computation arguments')
group.add_argument('--batch-size', type=int, default=1024,
help='Batch size to use on analogy task. '
'Decrease batch size if evaluation crashes.')
group.add_argument('--gpu', type=int,
help=('Number (index) of GPU to run on, e.g. 0. '
'If not specified, uses CPU.'))
group.add_argument('--no-hybridize', action='store_true',
help='Disable hybridization of gluon HybridBlocks.')
# Logging
group = parser.add_argument_group('Logging arguments')
group.add_argument('--logdir', type=str, default='logs',
help='Directory to store logs.')
# Evaluation options
evaluation.add_parameters(parser)
args = parser.parse_args()
validate_args(args)
evaluation.validate_args(args)
return args | [
"def",
"get_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Word embedding evaluation with Gluon.'",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"# Embeddings arguments",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Embedding arguments'",
")",
"group",
".",
"add_argument",
"(",
"'--embedding-path'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Path to a .vec in Word2Vec text foramt or '",
"'.bin binary fastText model file. '",
")",
"group",
".",
"add_argument",
"(",
"'--embedding-name'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"(",
"'Name of embedding type to load. '",
"'Valid entries: {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"nlp",
".",
"embedding",
".",
"list_sources",
"(",
")",
".",
"keys",
"(",
")",
")",
")",
")",
")",
"group",
".",
"add_argument",
"(",
"'--embedding-source'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"(",
"'Source from which to initialize the embedding.'",
"'Pass --list-embedding-sources to get a list of '",
"'valid sources for a given --embedding-name.'",
")",
")",
"group",
".",
"add_argument",
"(",
"'--fasttext-load-ngrams'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"(",
"'Specify load_ngrams=True '",
"'when loading pretrained fastText embedding.'",
")",
")",
"group",
".",
"add_argument",
"(",
"'--analogy-max-vocab-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
",",
"help",
"=",
"(",
"'Only retain the X first tokens from the pre-trained embedding. '",
"'The tokens are ordered by decreasing frequency.'",
"'As the analogy task takes the whole vocabulary into account, '",
"'removing very infrequent words improves performance.'",
")",
")",
"group",
".",
"add_argument",
"(",
"'--list-embedding-sources'",
",",
"action",
"=",
"'store_true'",
")",
"# Computation options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Computation arguments'",
")",
"group",
".",
"add_argument",
"(",
"'--batch-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1024",
",",
"help",
"=",
"'Batch size to use on analogy task. '",
"'Decrease batch size if evaluation crashes.'",
")",
"group",
".",
"add_argument",
"(",
"'--gpu'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"(",
"'Number (index) of GPU to run on, e.g. 0. '",
"'If not specified, uses CPU.'",
")",
")",
"group",
".",
"add_argument",
"(",
"'--no-hybridize'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Disable hybridization of gluon HybridBlocks.'",
")",
"# Logging",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Logging arguments'",
")",
"group",
".",
"add_argument",
"(",
"'--logdir'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'logs'",
",",
"help",
"=",
"'Directory to store logs.'",
")",
"# Evaluation options",
"evaluation",
".",
"add_parameters",
"(",
"parser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"validate_args",
"(",
"args",
")",
"evaluation",
".",
"validate_args",
"(",
"args",
")",
"return",
"args"
] | Construct the argument parser. | [
"Construct",
"the",
"argument",
"parser",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/evaluate_pretrained.py#L40-L97 | train |
dmlc/gluon-nlp | scripts/word_embeddings/evaluate_pretrained.py | validate_args | def validate_args(args):
"""Validate provided arguments and act on --help."""
if args.list_embedding_sources:
print('Listing all sources for {} embeddings.'.format(
args.embedding_name))
print('Specify --embedding-name if you wish to '
'list sources of other embeddings')
print('')
if args.embedding_name not in nlp.embedding.list_sources().keys():
print('Invalid embedding name.')
print('Only {} are supported.'.format(', '.join(
nlp.embedding.list_sources().keys())))
sys.exit(1)
print(' '.join(nlp.embedding.list_sources()[args.embedding_name]))
sys.exit(0)
if not (args.embedding_path or args.embedding_name):
print('You must specify either --embedding-path or --embedding-name ')
print('Use --embedding-path to load and evaluate '
'word embeddings from a Word2Vec text format '
'or fastText binary format file')
print('Use --embedding-name or to download one of '
'the pre-trained embedding files included in GluonNLP.')
sys.exit(1)
if args.embedding_name and not args.embedding_source:
print('Please also specify --embedding-source'
' to select the version of the pre-trained embedding. '
'Use --list-embedding-sources to see all available sources')
sys.exit(1)
print(args) | python | def validate_args(args):
"""Validate provided arguments and act on --help."""
if args.list_embedding_sources:
print('Listing all sources for {} embeddings.'.format(
args.embedding_name))
print('Specify --embedding-name if you wish to '
'list sources of other embeddings')
print('')
if args.embedding_name not in nlp.embedding.list_sources().keys():
print('Invalid embedding name.')
print('Only {} are supported.'.format(', '.join(
nlp.embedding.list_sources().keys())))
sys.exit(1)
print(' '.join(nlp.embedding.list_sources()[args.embedding_name]))
sys.exit(0)
if not (args.embedding_path or args.embedding_name):
print('You must specify either --embedding-path or --embedding-name ')
print('Use --embedding-path to load and evaluate '
'word embeddings from a Word2Vec text format '
'or fastText binary format file')
print('Use --embedding-name or to download one of '
'the pre-trained embedding files included in GluonNLP.')
sys.exit(1)
if args.embedding_name and not args.embedding_source:
print('Please also specify --embedding-source'
' to select the version of the pre-trained embedding. '
'Use --list-embedding-sources to see all available sources')
sys.exit(1)
print(args) | [
"def",
"validate_args",
"(",
"args",
")",
":",
"if",
"args",
".",
"list_embedding_sources",
":",
"print",
"(",
"'Listing all sources for {} embeddings.'",
".",
"format",
"(",
"args",
".",
"embedding_name",
")",
")",
"print",
"(",
"'Specify --embedding-name if you wish to '",
"'list sources of other embeddings'",
")",
"print",
"(",
"''",
")",
"if",
"args",
".",
"embedding_name",
"not",
"in",
"nlp",
".",
"embedding",
".",
"list_sources",
"(",
")",
".",
"keys",
"(",
")",
":",
"print",
"(",
"'Invalid embedding name.'",
")",
"print",
"(",
"'Only {} are supported.'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"nlp",
".",
"embedding",
".",
"list_sources",
"(",
")",
".",
"keys",
"(",
")",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"print",
"(",
"' '",
".",
"join",
"(",
"nlp",
".",
"embedding",
".",
"list_sources",
"(",
")",
"[",
"args",
".",
"embedding_name",
"]",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"if",
"not",
"(",
"args",
".",
"embedding_path",
"or",
"args",
".",
"embedding_name",
")",
":",
"print",
"(",
"'You must specify either --embedding-path or --embedding-name '",
")",
"print",
"(",
"'Use --embedding-path to load and evaluate '",
"'word embeddings from a Word2Vec text format '",
"'or fastText binary format file'",
")",
"print",
"(",
"'Use --embedding-name or to download one of '",
"'the pre-trained embedding files included in GluonNLP.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"embedding_name",
"and",
"not",
"args",
".",
"embedding_source",
":",
"print",
"(",
"'Please also specify --embedding-source'",
"' to select the version of the pre-trained embedding. '",
"'Use --list-embedding-sources to see all available sources'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"print",
"(",
"args",
")"
] | Validate provided arguments and act on --help. | [
"Validate",
"provided",
"arguments",
"and",
"act",
"on",
"--",
"help",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/evaluate_pretrained.py#L100-L131 | train |
dmlc/gluon-nlp | scripts/word_embeddings/evaluate_pretrained.py | load_embedding_from_path | def load_embedding_from_path(args):
"""Load a TokenEmbedding."""
if args.embedding_path.endswith('.bin'):
with utils.print_time('load fastText model.'):
model = \
nlp.model.train.FasttextEmbeddingModel.load_fasttext_format(
args.embedding_path)
idx_to_token = sorted(model._token_to_idx, key=model._token_to_idx.get)
embedding = nlp.embedding.TokenEmbedding(
unknown_token=None, unknown_lookup=model, allow_extend=True)
# Analogy task is open-vocabulary, so must keep all known words.
# But if not evaluating analogy, no need to precompute now as all
# words for closed vocabulary task can be obtained via the unknown
# lookup
if not args.analogy_datasets:
idx_to_token = []
elif args.analogy_datasets and args.analogy_max_vocab_size:
idx_to_token = idx_to_token[:args.analogy_max_vocab_size]
embedding['<unk>'] = mx.nd.zeros(model.weight.shape[1])
if idx_to_token:
with utils.print_time('compute vectors for {} known '
'words.'.format(len(idx_to_token))):
embedding[idx_to_token] = model[idx_to_token]
else:
embedding = nlp.embedding.TokenEmbedding.from_file(args.embedding_path)
return embedding | python | def load_embedding_from_path(args):
"""Load a TokenEmbedding."""
if args.embedding_path.endswith('.bin'):
with utils.print_time('load fastText model.'):
model = \
nlp.model.train.FasttextEmbeddingModel.load_fasttext_format(
args.embedding_path)
idx_to_token = sorted(model._token_to_idx, key=model._token_to_idx.get)
embedding = nlp.embedding.TokenEmbedding(
unknown_token=None, unknown_lookup=model, allow_extend=True)
# Analogy task is open-vocabulary, so must keep all known words.
# But if not evaluating analogy, no need to precompute now as all
# words for closed vocabulary task can be obtained via the unknown
# lookup
if not args.analogy_datasets:
idx_to_token = []
elif args.analogy_datasets and args.analogy_max_vocab_size:
idx_to_token = idx_to_token[:args.analogy_max_vocab_size]
embedding['<unk>'] = mx.nd.zeros(model.weight.shape[1])
if idx_to_token:
with utils.print_time('compute vectors for {} known '
'words.'.format(len(idx_to_token))):
embedding[idx_to_token] = model[idx_to_token]
else:
embedding = nlp.embedding.TokenEmbedding.from_file(args.embedding_path)
return embedding | [
"def",
"load_embedding_from_path",
"(",
"args",
")",
":",
"if",
"args",
".",
"embedding_path",
".",
"endswith",
"(",
"'.bin'",
")",
":",
"with",
"utils",
".",
"print_time",
"(",
"'load fastText model.'",
")",
":",
"model",
"=",
"nlp",
".",
"model",
".",
"train",
".",
"FasttextEmbeddingModel",
".",
"load_fasttext_format",
"(",
"args",
".",
"embedding_path",
")",
"idx_to_token",
"=",
"sorted",
"(",
"model",
".",
"_token_to_idx",
",",
"key",
"=",
"model",
".",
"_token_to_idx",
".",
"get",
")",
"embedding",
"=",
"nlp",
".",
"embedding",
".",
"TokenEmbedding",
"(",
"unknown_token",
"=",
"None",
",",
"unknown_lookup",
"=",
"model",
",",
"allow_extend",
"=",
"True",
")",
"# Analogy task is open-vocabulary, so must keep all known words.",
"# But if not evaluating analogy, no need to precompute now as all",
"# words for closed vocabulary task can be obtained via the unknown",
"# lookup",
"if",
"not",
"args",
".",
"analogy_datasets",
":",
"idx_to_token",
"=",
"[",
"]",
"elif",
"args",
".",
"analogy_datasets",
"and",
"args",
".",
"analogy_max_vocab_size",
":",
"idx_to_token",
"=",
"idx_to_token",
"[",
":",
"args",
".",
"analogy_max_vocab_size",
"]",
"embedding",
"[",
"'<unk>'",
"]",
"=",
"mx",
".",
"nd",
".",
"zeros",
"(",
"model",
".",
"weight",
".",
"shape",
"[",
"1",
"]",
")",
"if",
"idx_to_token",
":",
"with",
"utils",
".",
"print_time",
"(",
"'compute vectors for {} known '",
"'words.'",
".",
"format",
"(",
"len",
"(",
"idx_to_token",
")",
")",
")",
":",
"embedding",
"[",
"idx_to_token",
"]",
"=",
"model",
"[",
"idx_to_token",
"]",
"else",
":",
"embedding",
"=",
"nlp",
".",
"embedding",
".",
"TokenEmbedding",
".",
"from_file",
"(",
"args",
".",
"embedding_path",
")",
"return",
"embedding"
] | Load a TokenEmbedding. | [
"Load",
"a",
"TokenEmbedding",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/evaluate_pretrained.py#L134-L163 | train |
dmlc/gluon-nlp | scripts/bert/fp16_utils.py | grad_global_norm | def grad_global_norm(parameters, max_norm):
"""Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
norm, ratio = grad_global_norm(net.collect_params().values(), max_norm)
trainer.update(batch_size * ratio)
...
Parameters
----------
parameters : list of Parameters
Returns
-------
NDArray
Total norm. Shape is (1,)
NDArray
Ratio for rescaling gradients based on max_norm s.t. grad = grad / ratio.
If total norm is NaN, ratio will be NaN, too. Shape is (1,)
NDArray
Whether the total norm is finite. Shape is (1,)
"""
# collect gradient arrays
arrays = []
idx = 0
for p in parameters:
if p.grad_req != 'null':
p_grads = p.list_grad()
arrays.append(p_grads[idx % len(p_grads)])
idx += 1
assert len(arrays) > 0, 'No parameter found available for gradient norm.'
# compute gradient norms
def _norm(array):
# TODO(haibin) norm operator does not support fp16 safe reduction.
# Issue is tracked at: https://github.com/apache/incubator-mxnet/issues/14126
x = array.reshape((-1,)).astype('float32', copy=False)
return nd.dot(x, x)
norm_arrays = [_norm(arr) for arr in arrays]
# group norm arrays by ctx
def group_by_ctx(arr_list):
groups = collections.defaultdict(list)
for arr in arr_list:
ctx = arr.context
groups[ctx].append(arr)
return groups
norm_groups = group_by_ctx(norm_arrays)
# reduce
ctx, dtype = arrays[0].context, 'float32'
norms = [nd.add_n(*g).as_in_context(ctx) for g in norm_groups.values()]
total_norm = nd.add_n(*norms).sqrt()
scale = total_norm / max_norm
# is_finite = 0 if NaN or Inf, 1 otherwise.
is_finite = nd.contrib.isfinite(scale)
# if scale is finite, nd.maximum selects the max between scale and 1. That is,
# 1 is returned if total_norm does not exceed max_norm.
# if scale = NaN or Inf, the result of nd.minimum is undefined. Therefore, we use
# choices.take to return NaN or Inf.
scale_or_one = nd.maximum(nd.ones((1,), dtype=dtype, ctx=ctx), scale)
choices = nd.concat(scale, scale_or_one, dim=0)
chosen_scale = choices.take(is_finite)
return total_norm, chosen_scale, is_finite | python | def grad_global_norm(parameters, max_norm):
"""Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
norm, ratio = grad_global_norm(net.collect_params().values(), max_norm)
trainer.update(batch_size * ratio)
...
Parameters
----------
parameters : list of Parameters
Returns
-------
NDArray
Total norm. Shape is (1,)
NDArray
Ratio for rescaling gradients based on max_norm s.t. grad = grad / ratio.
If total norm is NaN, ratio will be NaN, too. Shape is (1,)
NDArray
Whether the total norm is finite. Shape is (1,)
"""
# collect gradient arrays
arrays = []
idx = 0
for p in parameters:
if p.grad_req != 'null':
p_grads = p.list_grad()
arrays.append(p_grads[idx % len(p_grads)])
idx += 1
assert len(arrays) > 0, 'No parameter found available for gradient norm.'
# compute gradient norms
def _norm(array):
# TODO(haibin) norm operator does not support fp16 safe reduction.
# Issue is tracked at: https://github.com/apache/incubator-mxnet/issues/14126
x = array.reshape((-1,)).astype('float32', copy=False)
return nd.dot(x, x)
norm_arrays = [_norm(arr) for arr in arrays]
# group norm arrays by ctx
def group_by_ctx(arr_list):
groups = collections.defaultdict(list)
for arr in arr_list:
ctx = arr.context
groups[ctx].append(arr)
return groups
norm_groups = group_by_ctx(norm_arrays)
# reduce
ctx, dtype = arrays[0].context, 'float32'
norms = [nd.add_n(*g).as_in_context(ctx) for g in norm_groups.values()]
total_norm = nd.add_n(*norms).sqrt()
scale = total_norm / max_norm
# is_finite = 0 if NaN or Inf, 1 otherwise.
is_finite = nd.contrib.isfinite(scale)
# if scale is finite, nd.maximum selects the max between scale and 1. That is,
# 1 is returned if total_norm does not exceed max_norm.
# if scale = NaN or Inf, the result of nd.minimum is undefined. Therefore, we use
# choices.take to return NaN or Inf.
scale_or_one = nd.maximum(nd.ones((1,), dtype=dtype, ctx=ctx), scale)
choices = nd.concat(scale, scale_or_one, dim=0)
chosen_scale = choices.take(is_finite)
return total_norm, chosen_scale, is_finite | [
"def",
"grad_global_norm",
"(",
"parameters",
",",
"max_norm",
")",
":",
"# collect gradient arrays",
"arrays",
"=",
"[",
"]",
"idx",
"=",
"0",
"for",
"p",
"in",
"parameters",
":",
"if",
"p",
".",
"grad_req",
"!=",
"'null'",
":",
"p_grads",
"=",
"p",
".",
"list_grad",
"(",
")",
"arrays",
".",
"append",
"(",
"p_grads",
"[",
"idx",
"%",
"len",
"(",
"p_grads",
")",
"]",
")",
"idx",
"+=",
"1",
"assert",
"len",
"(",
"arrays",
")",
">",
"0",
",",
"'No parameter found available for gradient norm.'",
"# compute gradient norms",
"def",
"_norm",
"(",
"array",
")",
":",
"# TODO(haibin) norm operator does not support fp16 safe reduction.",
"# Issue is tracked at: https://github.com/apache/incubator-mxnet/issues/14126",
"x",
"=",
"array",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
".",
"astype",
"(",
"'float32'",
",",
"copy",
"=",
"False",
")",
"return",
"nd",
".",
"dot",
"(",
"x",
",",
"x",
")",
"norm_arrays",
"=",
"[",
"_norm",
"(",
"arr",
")",
"for",
"arr",
"in",
"arrays",
"]",
"# group norm arrays by ctx",
"def",
"group_by_ctx",
"(",
"arr_list",
")",
":",
"groups",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"arr",
"in",
"arr_list",
":",
"ctx",
"=",
"arr",
".",
"context",
"groups",
"[",
"ctx",
"]",
".",
"append",
"(",
"arr",
")",
"return",
"groups",
"norm_groups",
"=",
"group_by_ctx",
"(",
"norm_arrays",
")",
"# reduce",
"ctx",
",",
"dtype",
"=",
"arrays",
"[",
"0",
"]",
".",
"context",
",",
"'float32'",
"norms",
"=",
"[",
"nd",
".",
"add_n",
"(",
"*",
"g",
")",
".",
"as_in_context",
"(",
"ctx",
")",
"for",
"g",
"in",
"norm_groups",
".",
"values",
"(",
")",
"]",
"total_norm",
"=",
"nd",
".",
"add_n",
"(",
"*",
"norms",
")",
".",
"sqrt",
"(",
")",
"scale",
"=",
"total_norm",
"/",
"max_norm",
"# is_finite = 0 if NaN or Inf, 1 otherwise.",
"is_finite",
"=",
"nd",
".",
"contrib",
".",
"isfinite",
"(",
"scale",
")",
"# if scale is finite, nd.maximum selects the max between scale and 1. That is,",
"# 1 is returned if total_norm does not exceed max_norm.",
"# if scale = NaN or Inf, the result of nd.minimum is undefined. Therefore, we use",
"# choices.take to return NaN or Inf.",
"scale_or_one",
"=",
"nd",
".",
"maximum",
"(",
"nd",
".",
"ones",
"(",
"(",
"1",
",",
")",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"ctx",
")",
",",
"scale",
")",
"choices",
"=",
"nd",
".",
"concat",
"(",
"scale",
",",
"scale_or_one",
",",
"dim",
"=",
"0",
")",
"chosen_scale",
"=",
"choices",
".",
"take",
"(",
"is_finite",
")",
"return",
"total_norm",
",",
"chosen_scale",
",",
"is_finite"
] | Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
norm, ratio = grad_global_norm(net.collect_params().values(), max_norm)
trainer.update(batch_size * ratio)
...
Parameters
----------
parameters : list of Parameters
Returns
-------
NDArray
Total norm. Shape is (1,)
NDArray
Ratio for rescaling gradients based on max_norm s.t. grad = grad / ratio.
If total norm is NaN, ratio will be NaN, too. Shape is (1,)
NDArray
Whether the total norm is finite. Shape is (1,) | [
"Calculate",
"the",
"2",
"-",
"norm",
"of",
"gradients",
"of",
"parameters",
"and",
"how",
"much",
"they",
"should",
"be",
"scaled",
"down",
"such",
"that",
"their",
"2",
"-",
"norm",
"does",
"not",
"exceed",
"max_norm",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/fp16_utils.py#L26-L107 | train |
dmlc/gluon-nlp | scripts/bert/fp16_utils.py | FP16Trainer.backward | def backward(self, loss):
"""backward propagation with loss"""
with mx.autograd.record():
if isinstance(loss, (tuple, list)):
ls = [l * self._scaler.loss_scale for l in loss]
else:
ls = loss * self._scaler.loss_scale
mx.autograd.backward(ls) | python | def backward(self, loss):
"""backward propagation with loss"""
with mx.autograd.record():
if isinstance(loss, (tuple, list)):
ls = [l * self._scaler.loss_scale for l in loss]
else:
ls = loss * self._scaler.loss_scale
mx.autograd.backward(ls) | [
"def",
"backward",
"(",
"self",
",",
"loss",
")",
":",
"with",
"mx",
".",
"autograd",
".",
"record",
"(",
")",
":",
"if",
"isinstance",
"(",
"loss",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"ls",
"=",
"[",
"l",
"*",
"self",
".",
"_scaler",
".",
"loss_scale",
"for",
"l",
"in",
"loss",
"]",
"else",
":",
"ls",
"=",
"loss",
"*",
"self",
".",
"_scaler",
".",
"loss_scale",
"mx",
".",
"autograd",
".",
"backward",
"(",
"ls",
")"
] | backward propagation with loss | [
"backward",
"propagation",
"with",
"loss"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/fp16_utils.py#L138-L145 | train |
dmlc/gluon-nlp | scripts/bert/fp16_utils.py | FP16Trainer.step | def step(self, batch_size, max_norm=None):
"""Makes one step of parameter update. Should be called after
`fp16_optimizer.backward()`, and outside of `record()` scope.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
max_norm : NDArray, optional, default is None
max value for global 2-norm of gradients.
"""
self.fp32_trainer.allreduce_grads()
step_size = batch_size * self._scaler.loss_scale
if max_norm:
norm, ratio, is_finite = grad_global_norm(self.fp32_trainer._params,
max_norm * self._scaler.loss_scale)
step_size = ratio * step_size
if self._support_nan_check:
self.fp32_trainer.update(step_size)
overflow = is_finite.asscalar() < 1
else:
overflow = not np.isfinite(norm.asscalar())
if not overflow:
self.fp32_trainer.update(step_size)
else:
# TODO(haibin) optimize the performance when max_norm is not present
# sequentially adding isnan/isinf results may be slow
if self._support_nan_check:
self.fp32_trainer.update(step_size)
overflow = self._scaler.has_overflow(self.fp32_trainer._params)
else:
overflow = self._scaler.has_overflow(self.fp32_trainer._params)
if not overflow:
self.fp32_trainer.update(step_size)
# update scale based on overflow information
self._scaler.update_scale(overflow) | python | def step(self, batch_size, max_norm=None):
"""Makes one step of parameter update. Should be called after
`fp16_optimizer.backward()`, and outside of `record()` scope.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
max_norm : NDArray, optional, default is None
max value for global 2-norm of gradients.
"""
self.fp32_trainer.allreduce_grads()
step_size = batch_size * self._scaler.loss_scale
if max_norm:
norm, ratio, is_finite = grad_global_norm(self.fp32_trainer._params,
max_norm * self._scaler.loss_scale)
step_size = ratio * step_size
if self._support_nan_check:
self.fp32_trainer.update(step_size)
overflow = is_finite.asscalar() < 1
else:
overflow = not np.isfinite(norm.asscalar())
if not overflow:
self.fp32_trainer.update(step_size)
else:
# TODO(haibin) optimize the performance when max_norm is not present
# sequentially adding isnan/isinf results may be slow
if self._support_nan_check:
self.fp32_trainer.update(step_size)
overflow = self._scaler.has_overflow(self.fp32_trainer._params)
else:
overflow = self._scaler.has_overflow(self.fp32_trainer._params)
if not overflow:
self.fp32_trainer.update(step_size)
# update scale based on overflow information
self._scaler.update_scale(overflow) | [
"def",
"step",
"(",
"self",
",",
"batch_size",
",",
"max_norm",
"=",
"None",
")",
":",
"self",
".",
"fp32_trainer",
".",
"allreduce_grads",
"(",
")",
"step_size",
"=",
"batch_size",
"*",
"self",
".",
"_scaler",
".",
"loss_scale",
"if",
"max_norm",
":",
"norm",
",",
"ratio",
",",
"is_finite",
"=",
"grad_global_norm",
"(",
"self",
".",
"fp32_trainer",
".",
"_params",
",",
"max_norm",
"*",
"self",
".",
"_scaler",
".",
"loss_scale",
")",
"step_size",
"=",
"ratio",
"*",
"step_size",
"if",
"self",
".",
"_support_nan_check",
":",
"self",
".",
"fp32_trainer",
".",
"update",
"(",
"step_size",
")",
"overflow",
"=",
"is_finite",
".",
"asscalar",
"(",
")",
"<",
"1",
"else",
":",
"overflow",
"=",
"not",
"np",
".",
"isfinite",
"(",
"norm",
".",
"asscalar",
"(",
")",
")",
"if",
"not",
"overflow",
":",
"self",
".",
"fp32_trainer",
".",
"update",
"(",
"step_size",
")",
"else",
":",
"# TODO(haibin) optimize the performance when max_norm is not present",
"# sequentially adding isnan/isinf results may be slow",
"if",
"self",
".",
"_support_nan_check",
":",
"self",
".",
"fp32_trainer",
".",
"update",
"(",
"step_size",
")",
"overflow",
"=",
"self",
".",
"_scaler",
".",
"has_overflow",
"(",
"self",
".",
"fp32_trainer",
".",
"_params",
")",
"else",
":",
"overflow",
"=",
"self",
".",
"_scaler",
".",
"has_overflow",
"(",
"self",
".",
"fp32_trainer",
".",
"_params",
")",
"if",
"not",
"overflow",
":",
"self",
".",
"fp32_trainer",
".",
"update",
"(",
"step_size",
")",
"# update scale based on overflow information",
"self",
".",
"_scaler",
".",
"update_scale",
"(",
"overflow",
")"
] | Makes one step of parameter update. Should be called after
`fp16_optimizer.backward()`, and outside of `record()` scope.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
max_norm : NDArray, optional, default is None
max value for global 2-norm of gradients. | [
"Makes",
"one",
"step",
"of",
"parameter",
"update",
".",
"Should",
"be",
"called",
"after",
"fp16_optimizer",
".",
"backward",
"()",
"and",
"outside",
"of",
"record",
"()",
"scope",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/fp16_utils.py#L147-L183 | train |
dmlc/gluon-nlp | scripts/bert/fp16_utils.py | LossScaler.has_overflow | def has_overflow(self, params):
""" detect inf and nan """
is_not_finite = 0
for param in params:
if param.grad_req != 'null':
grad = param.list_grad()[0]
is_not_finite += mx.nd.contrib.isnan(grad).sum()
is_not_finite += mx.nd.contrib.isinf(grad).sum()
# NDArray is implicitly converted to bool
if is_not_finite == 0:
return False
else:
return True | python | def has_overflow(self, params):
""" detect inf and nan """
is_not_finite = 0
for param in params:
if param.grad_req != 'null':
grad = param.list_grad()[0]
is_not_finite += mx.nd.contrib.isnan(grad).sum()
is_not_finite += mx.nd.contrib.isinf(grad).sum()
# NDArray is implicitly converted to bool
if is_not_finite == 0:
return False
else:
return True | [
"def",
"has_overflow",
"(",
"self",
",",
"params",
")",
":",
"is_not_finite",
"=",
"0",
"for",
"param",
"in",
"params",
":",
"if",
"param",
".",
"grad_req",
"!=",
"'null'",
":",
"grad",
"=",
"param",
".",
"list_grad",
"(",
")",
"[",
"0",
"]",
"is_not_finite",
"+=",
"mx",
".",
"nd",
".",
"contrib",
".",
"isnan",
"(",
"grad",
")",
".",
"sum",
"(",
")",
"is_not_finite",
"+=",
"mx",
".",
"nd",
".",
"contrib",
".",
"isinf",
"(",
"grad",
")",
".",
"sum",
"(",
")",
"# NDArray is implicitly converted to bool",
"if",
"is_not_finite",
"==",
"0",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | detect inf and nan | [
"detect",
"inf",
"and",
"nan"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/fp16_utils.py#L187-L199 | train |
dmlc/gluon-nlp | scripts/bert/fp16_utils.py | DynamicLossScaler.update_scale | def update_scale(self, overflow):
"""dynamically update loss scale"""
iter_since_rescale = self._num_steps - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._num_steps
self._overflows_since_rescale += 1
percentage = self._overflows_since_rescale / float(iter_since_rescale)
# we tolerate a certrain amount of NaNs before actually scaling it down
if percentage >= self.tolerance:
self.loss_scale /= self.scale_factor
self._last_rescale_iter = self._num_steps
self._overflows_since_rescale = 0
logging.info('DynamicLossScaler: overflow detected. set loss_scale = %s',
self.loss_scale)
elif (self._num_steps - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._num_steps
self._num_steps += 1 | python | def update_scale(self, overflow):
"""dynamically update loss scale"""
iter_since_rescale = self._num_steps - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._num_steps
self._overflows_since_rescale += 1
percentage = self._overflows_since_rescale / float(iter_since_rescale)
# we tolerate a certrain amount of NaNs before actually scaling it down
if percentage >= self.tolerance:
self.loss_scale /= self.scale_factor
self._last_rescale_iter = self._num_steps
self._overflows_since_rescale = 0
logging.info('DynamicLossScaler: overflow detected. set loss_scale = %s',
self.loss_scale)
elif (self._num_steps - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._num_steps
self._num_steps += 1 | [
"def",
"update_scale",
"(",
"self",
",",
"overflow",
")",
":",
"iter_since_rescale",
"=",
"self",
".",
"_num_steps",
"-",
"self",
".",
"_last_rescale_iter",
"if",
"overflow",
":",
"self",
".",
"_last_overflow_iter",
"=",
"self",
".",
"_num_steps",
"self",
".",
"_overflows_since_rescale",
"+=",
"1",
"percentage",
"=",
"self",
".",
"_overflows_since_rescale",
"/",
"float",
"(",
"iter_since_rescale",
")",
"# we tolerate a certrain amount of NaNs before actually scaling it down",
"if",
"percentage",
">=",
"self",
".",
"tolerance",
":",
"self",
".",
"loss_scale",
"/=",
"self",
".",
"scale_factor",
"self",
".",
"_last_rescale_iter",
"=",
"self",
".",
"_num_steps",
"self",
".",
"_overflows_since_rescale",
"=",
"0",
"logging",
".",
"info",
"(",
"'DynamicLossScaler: overflow detected. set loss_scale = %s'",
",",
"self",
".",
"loss_scale",
")",
"elif",
"(",
"self",
".",
"_num_steps",
"-",
"self",
".",
"_last_overflow_iter",
")",
"%",
"self",
".",
"scale_window",
"==",
"0",
":",
"self",
".",
"loss_scale",
"*=",
"self",
".",
"scale_factor",
"self",
".",
"_last_rescale_iter",
"=",
"self",
".",
"_num_steps",
"self",
".",
"_num_steps",
"+=",
"1"
] | dynamically update loss scale | [
"dynamically",
"update",
"loss",
"scale"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/fp16_utils.py#L236-L253 | train |
dmlc/gluon-nlp | src/gluonnlp/data/sampler.py | FixedBucketSampler.stats | def stats(self):
"""Return a string representing the statistics of the bucketing sampler.
Returns
-------
ret : str
String representing the statistics of the buckets.
"""
ret = '{name}:\n' \
' sample_num={sample_num}, batch_num={batch_num}\n' \
' key={bucket_keys}\n' \
' cnt={bucket_counts}\n' \
' batch_size={bucket_batch_sizes}'\
.format(name=self.__class__.__name__,
sample_num=len(self._lengths),
batch_num=len(self._batch_infos),
bucket_keys=self._bucket_keys,
bucket_counts=[len(sample_ids) for sample_ids in self._bucket_sample_ids],
bucket_batch_sizes=self._bucket_batch_sizes)
return ret | python | def stats(self):
"""Return a string representing the statistics of the bucketing sampler.
Returns
-------
ret : str
String representing the statistics of the buckets.
"""
ret = '{name}:\n' \
' sample_num={sample_num}, batch_num={batch_num}\n' \
' key={bucket_keys}\n' \
' cnt={bucket_counts}\n' \
' batch_size={bucket_batch_sizes}'\
.format(name=self.__class__.__name__,
sample_num=len(self._lengths),
batch_num=len(self._batch_infos),
bucket_keys=self._bucket_keys,
bucket_counts=[len(sample_ids) for sample_ids in self._bucket_sample_ids],
bucket_batch_sizes=self._bucket_batch_sizes)
return ret | [
"def",
"stats",
"(",
"self",
")",
":",
"ret",
"=",
"'{name}:\\n'",
"' sample_num={sample_num}, batch_num={batch_num}\\n'",
"' key={bucket_keys}\\n'",
"' cnt={bucket_counts}\\n'",
"' batch_size={bucket_batch_sizes}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"__class__",
".",
"__name__",
",",
"sample_num",
"=",
"len",
"(",
"self",
".",
"_lengths",
")",
",",
"batch_num",
"=",
"len",
"(",
"self",
".",
"_batch_infos",
")",
",",
"bucket_keys",
"=",
"self",
".",
"_bucket_keys",
",",
"bucket_counts",
"=",
"[",
"len",
"(",
"sample_ids",
")",
"for",
"sample_ids",
"in",
"self",
".",
"_bucket_sample_ids",
"]",
",",
"bucket_batch_sizes",
"=",
"self",
".",
"_bucket_batch_sizes",
")",
"return",
"ret"
] | Return a string representing the statistics of the bucketing sampler.
Returns
-------
ret : str
String representing the statistics of the buckets. | [
"Return",
"a",
"string",
"representing",
"the",
"statistics",
"of",
"the",
"bucketing",
"sampler",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/sampler.py#L420-L439 | train |
dmlc/gluon-nlp | scripts/language_model/large_word_language_model.py | train | def train():
"""Training loop for language model.
"""
print(model)
from_epoch = 0
model.initialize(mx.init.Xavier(factor_type='out'), ctx=context)
trainer_params = {'learning_rate': args.lr, 'wd': 0, 'eps': args.eps}
trainer = gluon.Trainer(model.collect_params(), 'adagrad', trainer_params)
if args.from_epoch:
from_epoch = args.from_epoch
checkpoint_name = '%s.%s'%(args.save, format(from_epoch - 1, '02d'))
model.load_parameters(checkpoint_name)
trainer.load_states('%s.state'%args.save)
print('Loaded parameters from checkpoint %s'%(checkpoint_name))
model.hybridize(static_alloc=True, static_shape=True)
encoder_params = model.encoder.collect_params().values()
embedding_params = list(model.embedding.collect_params().values())
parallel_model = ParallelBigRNN(model, loss)
parallel = Parallel(len(context), parallel_model)
for epoch in range(from_epoch, args.epochs):
sys.stdout.flush()
total_L = 0.0
start_epoch_time = time.time()
start_log_interval_time = time.time()
hiddens = [model.begin_state(batch_size=args.batch_size,
func=mx.nd.zeros, ctx=ctx) for ctx in context]
nbatch = 0
has_next = True
train_data_iter = iter(train_data)
data, target, mask, sample = next(train_data_iter)
while has_next:
nbatch += 1
hiddens = detach(hiddens)
Ls = []
for _, batch in enumerate(zip(data, target, mask, sample, hiddens)):
parallel.put(batch)
for _ in range(len(data)):
hidden, ls = parallel.get()
# hidden states are ordered by context id
index = context.index(hidden[0].context)
hiddens[index] = hidden
Ls.append(ls)
# prefetch the next batch of data
try:
data, target, mask, sample = next(train_data_iter)
except StopIteration:
has_next = False
# rescale embedding grad
for ctx in context:
x = embedding_params[0].grad(ctx)
x[:] *= args.batch_size
encoder_grad = [p.grad(ctx) for p in encoder_params]
# perform gradient clipping per ctx
gluon.utils.clip_global_norm(encoder_grad, args.clip)
trainer.step(len(context))
total_L += sum([mx.nd.sum(L).asscalar() / args.bptt for L in Ls])
if nbatch % args.log_interval == 0:
cur_L = total_L / args.log_interval / len(context)
ppl = math.exp(cur_L) if cur_L < 100 else float('inf')
print('[Epoch %d Batch %d] loss %.2f, ppl %.2f, '
'throughput %.2f samples/s'
%(epoch, nbatch, cur_L, ppl,
train_batch_size*args.log_interval/(time.time()-start_log_interval_time)))
total_L = 0.0
start_log_interval_time = time.time()
sys.stdout.flush()
end_epoch_time = time.time()
print('Epoch %d took %.2f seconds.'%(epoch, end_epoch_time - start_epoch_time))
mx.nd.waitall()
checkpoint_name = '%s.%s'%(args.save, format(epoch, '02d'))
model.save_parameters(checkpoint_name)
trainer.save_states('%s.state'%args.save) | python | def train():
"""Training loop for language model.
"""
print(model)
from_epoch = 0
model.initialize(mx.init.Xavier(factor_type='out'), ctx=context)
trainer_params = {'learning_rate': args.lr, 'wd': 0, 'eps': args.eps}
trainer = gluon.Trainer(model.collect_params(), 'adagrad', trainer_params)
if args.from_epoch:
from_epoch = args.from_epoch
checkpoint_name = '%s.%s'%(args.save, format(from_epoch - 1, '02d'))
model.load_parameters(checkpoint_name)
trainer.load_states('%s.state'%args.save)
print('Loaded parameters from checkpoint %s'%(checkpoint_name))
model.hybridize(static_alloc=True, static_shape=True)
encoder_params = model.encoder.collect_params().values()
embedding_params = list(model.embedding.collect_params().values())
parallel_model = ParallelBigRNN(model, loss)
parallel = Parallel(len(context), parallel_model)
for epoch in range(from_epoch, args.epochs):
sys.stdout.flush()
total_L = 0.0
start_epoch_time = time.time()
start_log_interval_time = time.time()
hiddens = [model.begin_state(batch_size=args.batch_size,
func=mx.nd.zeros, ctx=ctx) for ctx in context]
nbatch = 0
has_next = True
train_data_iter = iter(train_data)
data, target, mask, sample = next(train_data_iter)
while has_next:
nbatch += 1
hiddens = detach(hiddens)
Ls = []
for _, batch in enumerate(zip(data, target, mask, sample, hiddens)):
parallel.put(batch)
for _ in range(len(data)):
hidden, ls = parallel.get()
# hidden states are ordered by context id
index = context.index(hidden[0].context)
hiddens[index] = hidden
Ls.append(ls)
# prefetch the next batch of data
try:
data, target, mask, sample = next(train_data_iter)
except StopIteration:
has_next = False
# rescale embedding grad
for ctx in context:
x = embedding_params[0].grad(ctx)
x[:] *= args.batch_size
encoder_grad = [p.grad(ctx) for p in encoder_params]
# perform gradient clipping per ctx
gluon.utils.clip_global_norm(encoder_grad, args.clip)
trainer.step(len(context))
total_L += sum([mx.nd.sum(L).asscalar() / args.bptt for L in Ls])
if nbatch % args.log_interval == 0:
cur_L = total_L / args.log_interval / len(context)
ppl = math.exp(cur_L) if cur_L < 100 else float('inf')
print('[Epoch %d Batch %d] loss %.2f, ppl %.2f, '
'throughput %.2f samples/s'
%(epoch, nbatch, cur_L, ppl,
train_batch_size*args.log_interval/(time.time()-start_log_interval_time)))
total_L = 0.0
start_log_interval_time = time.time()
sys.stdout.flush()
end_epoch_time = time.time()
print('Epoch %d took %.2f seconds.'%(epoch, end_epoch_time - start_epoch_time))
mx.nd.waitall()
checkpoint_name = '%s.%s'%(args.save, format(epoch, '02d'))
model.save_parameters(checkpoint_name)
trainer.save_states('%s.state'%args.save) | [
"def",
"train",
"(",
")",
":",
"print",
"(",
"model",
")",
"from_epoch",
"=",
"0",
"model",
".",
"initialize",
"(",
"mx",
".",
"init",
".",
"Xavier",
"(",
"factor_type",
"=",
"'out'",
")",
",",
"ctx",
"=",
"context",
")",
"trainer_params",
"=",
"{",
"'learning_rate'",
":",
"args",
".",
"lr",
",",
"'wd'",
":",
"0",
",",
"'eps'",
":",
"args",
".",
"eps",
"}",
"trainer",
"=",
"gluon",
".",
"Trainer",
"(",
"model",
".",
"collect_params",
"(",
")",
",",
"'adagrad'",
",",
"trainer_params",
")",
"if",
"args",
".",
"from_epoch",
":",
"from_epoch",
"=",
"args",
".",
"from_epoch",
"checkpoint_name",
"=",
"'%s.%s'",
"%",
"(",
"args",
".",
"save",
",",
"format",
"(",
"from_epoch",
"-",
"1",
",",
"'02d'",
")",
")",
"model",
".",
"load_parameters",
"(",
"checkpoint_name",
")",
"trainer",
".",
"load_states",
"(",
"'%s.state'",
"%",
"args",
".",
"save",
")",
"print",
"(",
"'Loaded parameters from checkpoint %s'",
"%",
"(",
"checkpoint_name",
")",
")",
"model",
".",
"hybridize",
"(",
"static_alloc",
"=",
"True",
",",
"static_shape",
"=",
"True",
")",
"encoder_params",
"=",
"model",
".",
"encoder",
".",
"collect_params",
"(",
")",
".",
"values",
"(",
")",
"embedding_params",
"=",
"list",
"(",
"model",
".",
"embedding",
".",
"collect_params",
"(",
")",
".",
"values",
"(",
")",
")",
"parallel_model",
"=",
"ParallelBigRNN",
"(",
"model",
",",
"loss",
")",
"parallel",
"=",
"Parallel",
"(",
"len",
"(",
"context",
")",
",",
"parallel_model",
")",
"for",
"epoch",
"in",
"range",
"(",
"from_epoch",
",",
"args",
".",
"epochs",
")",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"total_L",
"=",
"0.0",
"start_epoch_time",
"=",
"time",
".",
"time",
"(",
")",
"start_log_interval_time",
"=",
"time",
".",
"time",
"(",
")",
"hiddens",
"=",
"[",
"model",
".",
"begin_state",
"(",
"batch_size",
"=",
"args",
".",
"batch_size",
",",
"func",
"=",
"mx",
".",
"nd",
".",
"zeros",
",",
"ctx",
"=",
"ctx",
")",
"for",
"ctx",
"in",
"context",
"]",
"nbatch",
"=",
"0",
"has_next",
"=",
"True",
"train_data_iter",
"=",
"iter",
"(",
"train_data",
")",
"data",
",",
"target",
",",
"mask",
",",
"sample",
"=",
"next",
"(",
"train_data_iter",
")",
"while",
"has_next",
":",
"nbatch",
"+=",
"1",
"hiddens",
"=",
"detach",
"(",
"hiddens",
")",
"Ls",
"=",
"[",
"]",
"for",
"_",
",",
"batch",
"in",
"enumerate",
"(",
"zip",
"(",
"data",
",",
"target",
",",
"mask",
",",
"sample",
",",
"hiddens",
")",
")",
":",
"parallel",
".",
"put",
"(",
"batch",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"hidden",
",",
"ls",
"=",
"parallel",
".",
"get",
"(",
")",
"# hidden states are ordered by context id",
"index",
"=",
"context",
".",
"index",
"(",
"hidden",
"[",
"0",
"]",
".",
"context",
")",
"hiddens",
"[",
"index",
"]",
"=",
"hidden",
"Ls",
".",
"append",
"(",
"ls",
")",
"# prefetch the next batch of data",
"try",
":",
"data",
",",
"target",
",",
"mask",
",",
"sample",
"=",
"next",
"(",
"train_data_iter",
")",
"except",
"StopIteration",
":",
"has_next",
"=",
"False",
"# rescale embedding grad",
"for",
"ctx",
"in",
"context",
":",
"x",
"=",
"embedding_params",
"[",
"0",
"]",
".",
"grad",
"(",
"ctx",
")",
"x",
"[",
":",
"]",
"*=",
"args",
".",
"batch_size",
"encoder_grad",
"=",
"[",
"p",
".",
"grad",
"(",
"ctx",
")",
"for",
"p",
"in",
"encoder_params",
"]",
"# perform gradient clipping per ctx",
"gluon",
".",
"utils",
".",
"clip_global_norm",
"(",
"encoder_grad",
",",
"args",
".",
"clip",
")",
"trainer",
".",
"step",
"(",
"len",
"(",
"context",
")",
")",
"total_L",
"+=",
"sum",
"(",
"[",
"mx",
".",
"nd",
".",
"sum",
"(",
"L",
")",
".",
"asscalar",
"(",
")",
"/",
"args",
".",
"bptt",
"for",
"L",
"in",
"Ls",
"]",
")",
"if",
"nbatch",
"%",
"args",
".",
"log_interval",
"==",
"0",
":",
"cur_L",
"=",
"total_L",
"/",
"args",
".",
"log_interval",
"/",
"len",
"(",
"context",
")",
"ppl",
"=",
"math",
".",
"exp",
"(",
"cur_L",
")",
"if",
"cur_L",
"<",
"100",
"else",
"float",
"(",
"'inf'",
")",
"print",
"(",
"'[Epoch %d Batch %d] loss %.2f, ppl %.2f, '",
"'throughput %.2f samples/s'",
"%",
"(",
"epoch",
",",
"nbatch",
",",
"cur_L",
",",
"ppl",
",",
"train_batch_size",
"*",
"args",
".",
"log_interval",
"/",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_log_interval_time",
")",
")",
")",
"total_L",
"=",
"0.0",
"start_log_interval_time",
"=",
"time",
".",
"time",
"(",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"end_epoch_time",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'Epoch %d took %.2f seconds.'",
"%",
"(",
"epoch",
",",
"end_epoch_time",
"-",
"start_epoch_time",
")",
")",
"mx",
".",
"nd",
".",
"waitall",
"(",
")",
"checkpoint_name",
"=",
"'%s.%s'",
"%",
"(",
"args",
".",
"save",
",",
"format",
"(",
"epoch",
",",
"'02d'",
")",
")",
"model",
".",
"save_parameters",
"(",
"checkpoint_name",
")",
"trainer",
".",
"save_states",
"(",
"'%s.state'",
"%",
"args",
".",
"save",
")"
] | Training loop for language model. | [
"Training",
"loop",
"for",
"language",
"model",
"."
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/language_model/large_word_language_model.py#L210-L290 | train |
dmlc/gluon-nlp | scripts/language_model/large_word_language_model.py | evaluate | def evaluate():
""" Evaluate loop for the trained model """
print(eval_model)
eval_model.initialize(mx.init.Xavier(), ctx=context[0])
eval_model.hybridize(static_alloc=True, static_shape=True)
epoch = args.from_epoch if args.from_epoch else 0
while epoch < args.epochs:
checkpoint_name = '%s.%s'%(args.save, format(epoch, '02d'))
if not os.path.exists(checkpoint_name):
print('Wait for a new checkpoint...')
# check again after 600 seconds
time.sleep(600)
continue
eval_model.load_parameters(checkpoint_name)
print('Loaded parameters from checkpoint %s'%(checkpoint_name))
start_epoch_time = time.time()
final_test_L = test(test_data, test_batch_size, ctx=context[0])
end_epoch_time = time.time()
print('[Epoch %d] test loss %.2f, test ppl %.2f'%
(epoch, final_test_L, math.exp(final_test_L)))
print('Epoch %d took %.2f seconds.'%(epoch, end_epoch_time - start_epoch_time))
sys.stdout.flush()
epoch += 1 | python | def evaluate():
""" Evaluate loop for the trained model """
print(eval_model)
eval_model.initialize(mx.init.Xavier(), ctx=context[0])
eval_model.hybridize(static_alloc=True, static_shape=True)
epoch = args.from_epoch if args.from_epoch else 0
while epoch < args.epochs:
checkpoint_name = '%s.%s'%(args.save, format(epoch, '02d'))
if not os.path.exists(checkpoint_name):
print('Wait for a new checkpoint...')
# check again after 600 seconds
time.sleep(600)
continue
eval_model.load_parameters(checkpoint_name)
print('Loaded parameters from checkpoint %s'%(checkpoint_name))
start_epoch_time = time.time()
final_test_L = test(test_data, test_batch_size, ctx=context[0])
end_epoch_time = time.time()
print('[Epoch %d] test loss %.2f, test ppl %.2f'%
(epoch, final_test_L, math.exp(final_test_L)))
print('Epoch %d took %.2f seconds.'%(epoch, end_epoch_time - start_epoch_time))
sys.stdout.flush()
epoch += 1 | [
"def",
"evaluate",
"(",
")",
":",
"print",
"(",
"eval_model",
")",
"eval_model",
".",
"initialize",
"(",
"mx",
".",
"init",
".",
"Xavier",
"(",
")",
",",
"ctx",
"=",
"context",
"[",
"0",
"]",
")",
"eval_model",
".",
"hybridize",
"(",
"static_alloc",
"=",
"True",
",",
"static_shape",
"=",
"True",
")",
"epoch",
"=",
"args",
".",
"from_epoch",
"if",
"args",
".",
"from_epoch",
"else",
"0",
"while",
"epoch",
"<",
"args",
".",
"epochs",
":",
"checkpoint_name",
"=",
"'%s.%s'",
"%",
"(",
"args",
".",
"save",
",",
"format",
"(",
"epoch",
",",
"'02d'",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"checkpoint_name",
")",
":",
"print",
"(",
"'Wait for a new checkpoint...'",
")",
"# check again after 600 seconds",
"time",
".",
"sleep",
"(",
"600",
")",
"continue",
"eval_model",
".",
"load_parameters",
"(",
"checkpoint_name",
")",
"print",
"(",
"'Loaded parameters from checkpoint %s'",
"%",
"(",
"checkpoint_name",
")",
")",
"start_epoch_time",
"=",
"time",
".",
"time",
"(",
")",
"final_test_L",
"=",
"test",
"(",
"test_data",
",",
"test_batch_size",
",",
"ctx",
"=",
"context",
"[",
"0",
"]",
")",
"end_epoch_time",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'[Epoch %d] test loss %.2f, test ppl %.2f'",
"%",
"(",
"epoch",
",",
"final_test_L",
",",
"math",
".",
"exp",
"(",
"final_test_L",
")",
")",
")",
"print",
"(",
"'Epoch %d took %.2f seconds.'",
"%",
"(",
"epoch",
",",
"end_epoch_time",
"-",
"start_epoch_time",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"epoch",
"+=",
"1"
] | Evaluate loop for the trained model | [
"Evaluate",
"loop",
"for",
"the",
"trained",
"model"
] | 4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/language_model/large_word_language_model.py#L345-L367 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.