repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
amuse
amuse-main/doc/tutorial/nearestneighbor/plummer3.py
from interface import NearestNeighbor from amuse.lab import * from amuse.io import text if __name__ == '__main__': number_of_particles = 1000 particles = new_plummer_sphere(1000) code = NearestNeighbor() code.set_maximum_number_of_particles(5000) code.commit_parameters code.particles.add_particles(particles) code.run() local_particles = code.particles.copy() delta = local_particles.neighbor1.as_set().position - local_particles.position local_particles.dx = delta[...,0] local_particles.dy = delta[...,1] local_particles.dz = delta[...,2] output = text.TableFormattedText("output.txt", set = local_particles) output.attribute_names = ['x','y','z', 'dx', 'dy','dz'] output.store()
753
26.925926
82
py
amuse
amuse-main/doc/sphinxext/io_directive.py
from docutils import nodes from docutils.parsers.rst import directives from docutils.parsers.rst import Directive from amuse import io import textwrap from sphinx import addnodes class IoOptions(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False option_spec = {} has_content = False def run(self): options = io.get_options_for_format(self.arguments[0]) field_list_node = nodes.definition_list() for name, description, value in options: item = nodes.definition_list_item() item.append(nodes.term(name + ' ',name+ ' ')) item.append(nodes.definition('', nodes.paragraph('', description))) field_list_node.append(item) return [field_list_node] def setup(app): directives.register_directive("iooptions", IoOptions)
874
27.225806
79
py
amuse
amuse-main/doc/sphinxext/gen_rst.py
""" generate the rst files for the examples by iterating over the pylab examples """ import os, glob import os import re import sys fileList = [] def out_of_date(original, derived): """ Returns True if derivative is out-of-date wrt original, both of which are full file paths. TODO: this check isn't adequate in some cases. Eg, if we discover a bug when building the examples, the original and derived will be unchanged but we still want to force a rebuild. """ return (not os.path.exists(derived) or os.stat(derived).st_mtime < os.stat(original).st_mtime) noplot_regex = re.compile(r"#\s*-\*-\s*noplot\s*-\*-") def generate_example_rst(app): rootdir = os.path.join(app.builder.srcdir, 'amuse_examples') exampledir = os.path.join(app.builder.srcdir, 'examples') if not os.path.exists(exampledir): os.makedirs(exampledir) datad = {} for root, subFolders, files in os.walk(rootdir): for fname in files: if ( fname.startswith('.') or fname.startswith('#') or fname.startswith('_') or fname.find('.svn')>=0 or not fname.endswith('.py') ): continue fullpath = os.path.join(root,fname) contents = file(fullpath).read() # indent relpath = os.path.split(root)[-1] datad.setdefault(relpath, []).append((fullpath, fname, contents)) subdirs = list(datad.keys()) subdirs.sort() fhindex = file(os.path.join(exampledir, 'index.txt'), 'w') fhindex.write("""\ .. _examples-index: #################### AMUSE Examples #################### .. htmlonly:: :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 """) for subdir in subdirs: rstdir = os.path.join(exampledir, subdir) if not os.path.exists(rstdir): os.makedirs(rstdir) outputdir = os.path.join(app.builder.outdir, 'examples') if not os.path.exists(outputdir): os.makedirs(outputdir) outputdir = os.path.join(outputdir, subdir) if not os.path.exists(outputdir): os.makedirs(outputdir) subdirIndexFile = os.path.join(rstdir, 'index.txt') fhsubdirIndex = file(subdirIndexFile, 'w') fhindex.write(' %s/index.txt\n\n'%subdir) fhsubdirIndex.write("""\ .. _%s-examples-index: ############################################## %s Examples ############################################## .. htmlonly:: :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 """%(subdir, subdir)) sys.stdout.write(subdir + ", ") sys.stdout.flush() data = datad[subdir] data.sort() for fullpath, fname, contents in data: basename, ext = os.path.splitext(fname) outputfile = os.path.join(outputdir, fname) #thumbfile = os.path.join(thumb_dir, '%s.png'%basename) #print ' static_dir=%s, basename=%s, fullpath=%s, fname=%s, thumb_dir=%s, thumbfile=%s'%(static_dir, basename, fullpath, fname, thumb_dir, thumbfile) rstfile = '%s.txt'%basename outrstfile = os.path.join(rstdir, rstfile) fhsubdirIndex.write(' %s\n'%rstfile) if not out_of_date(fullpath, outrstfile): continue fh = file(outrstfile, 'w') fh.write('.. _%s-%s:\n\n'%(subdir, basename)) title = '%s example code: %s'%(subdir, fname) #title = '<img src=%s> %s example code: %s'%(thumbfile, subdir, fname) fh.write(title + '\n') fh.write('='*len(title) + '\n\n') do_plot = ( subdir in ( 'simple', ) and not noplot_regex.search(contents) ) if do_plot: fh.write("\n\n.. plot:: %s\n\n.. code-block:: python\n\n" % fullpath) else: fh.write("[`source code <%s>`_]\n\n.. code-block:: python\n\n" % fname) fhstatic = file(outputfile, 'w') fhstatic.write(contents) fhstatic.close() # indent the contents contents = '\n'.join([' %s'%row.rstrip() for row in contents.split('\n')]) fh.write(contents) fh.write('\n\nKeywords: python, amuse, astrophysics, matplotlib, pylab, example, codex (see :ref:`how-to-search-examples`)') fh.close() fhsubdirIndex.close() fhindex.close() print() def setup(app): app.connect('builder-inited', generate_example_rst)
4,615
28.21519
164
py
amuse
amuse-main/doc/sphinxext/autodoc_parameters.py
from docutils import nodes from docutils.parsers.rst import directives from docutils.parsers.rst import Directive import textwrap import sys from sphinx import addnodes from amuse.rfi.core import is_mpd_running from sphinx.ext.autodoc import AttributeDocumenter, ModuleLevelDocumenter from sphinx.util.docstrings import prepare_docstring # Taken from gh#sphinx-doc/sphinx#9326 def force_decode(string: str, encoding: str) -> str: """Forcibly get a unicode string out of a bytestring.""" #~ warnings.warn('force_decode() is deprecated.', #~ RemovedInSphinx50Warning, stacklevel=2) if isinstance(string, bytes): try: if encoding: string = string.decode(encoding) else: # try decoding with utf-8, should only work for real UTF-8 string = string.decode() except UnicodeError: # last resort -- can't fail string = string.decode('latin1') return string class ParametersAttributeDocumenter(AttributeDocumenter): """ Specialized Documenter subclass for parameters attribute of interfaces """ objtype = 'parametersattribute' directivetype = 'attribute' member_order = 60 # must be higher than AttributeDocumenter priority = 11 @classmethod def can_document_member(cls, member, membername, isattr, parent): return False def add_content(self, more_content, no_docstring=False): if not is_mpd_running(): return try: cls = self.object instance = cls(must_start_worker = False, must_handle_state = False) try: #instance.initialize_code() parameter_documentation = self.get_sphinx_doc_for_parameters(instance.parameters) finally: instance.stop() except Exception as ex: print(ex) return if self.analyzer: # prevent encoding errors when the file name is non-ASCII filename = str(self.analyzer.srcname) sourcename = '%s:docstring of %s' % (filename, self.fullname) else: sourcename = 'docstring of %s' % self.fullname encoding = self.analyzer # and self.analyzer.encoding lines = prepare_docstring(force_decode(parameter_documentation, encoding)) for i, line in enumerate(self.process_doc([lines,])): self.add_line(line, sourcename, i) def get_sphinx_doc_for_parameters(self, parameters): lines = [] for parameter_definition in parameters._definitions: lines.append('.. py:attribute:: '+ self.objpath[-1] +'.' + parameter_definition.name) lines.append('') dedented = textwrap.dedent(parameter_definition.description) for x in dedented.splitlines(): lines.append(' ' + x) try: lines.append(' ' + "(default value:" + str(parameters.get_default_value_for(parameter_definition.name)) + ")") except Exception as ex: lines.append(' ' + "(no default value)") lines.append('') lines.append('') return '\n'.join(lines) def import_object(self): """ Import the object given by *self.modname* and *self.objpath* and sets it as *self.object*. Returns True if successful, False if an error occurred. """ #~ self._datadescriptor = False try: __import__(self.modname) parent = None obj = self.module = sys.modules[self.modname] for part in self.objpath[:-1]: parent = obj obj = self.get_attr(obj, part) self.object_name = part self.parent = parent self.object = obj return True # this used to only catch SyntaxError, ImportError and AttributeError, # but importing modules with side effects can raise all kinds of errors except Exception as err: if self.env.app and not self.env.app.quiet: self.env.app.info(traceback.format_exc().rstrip()) self.directive.warn( 'autodoc can\'t import/find %s %r, it reported error: ' '"%s", please check your spelling and sys.path' % (self.objtype, str(self.fullname), err)) self.env.note_reread() return False def setup(app): app.add_autodocumenter(ParametersAttributeDocumenter)
4,706
34.390977
129
py
amuse
amuse-main/doc/sphinxext/gen_gallery.py
# generate a thumbnail gallery of examples template = """\ {%% extends "layout.html" %%} {%% set title = "Thumbnail gallery" %%} {%% block body %%} <h3>Click on any image to see full size image and source code</h3> <br/> %s {%% endblock %%} """ import os, glob, re, sys, warnings import matplotlib.image as image multiimage = re.compile('(.*)_\d\d') def make_thumbnail(args): image.thumbnail(args[0], args[1], 0.3) def out_of_date(original, derived): return (not os.path.exists(derived) or os.stat(derived).st_mtime < os.stat(original).st_mtime) def gen_gallery(app, doctree): if app.builder.name != 'html': return outdir = app.builder.outdir rootdir = 'plot_directive/amuse_examples' # images we want to skip for the gallery because they are an unusual # size that doesn't layout well in a table, or because they may be # redundant with other images or uninteresting skips = set([ ]) data = [] thumbnails = {} for subdir in ('simple', ): origdir = os.path.join('build', rootdir, subdir) thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails') if not os.path.exists(thumbdir): os.makedirs(thumbdir) for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))): if filename.endswith("hires.png"): continue path, filename = os.path.split(filename) basename, ext = os.path.splitext(filename) if basename in skips: continue # Create thumbnails based on images in tmpdir, and place # them within the build tree orig_path = str(os.path.join(origdir, filename)) thumb_path = str(os.path.join(thumbdir, filename)) if out_of_date(orig_path, thumb_path) or True: thumbnails[orig_path] = thumb_path m = multiimage.match(basename) if m is None: pyfile = '%s.py'%basename else: basename = m.group(1) pyfile = '%s.py'%basename data.append((subdir, basename, os.path.join(rootdir, subdir, 'thumbnails', filename))) link_template = """\ <a href="%s"><img src="%s" border="0" alt="%s"/></a> """ if len(data) == 0: warnings.warn("No thumbnails were found") rows = [] for (subdir, basename, thumbfile) in data: if thumbfile is not None: link = 'examples/%s/%s.html'%(subdir, basename) rows.append(link_template%(link, thumbfile, basename)) # Only write out the file if the contents have actually changed. # Otherwise, this triggers a full rebuild of the docs content = template%'\n'.join(rows) gallery_path = os.path.join(app.builder.srcdir, '_templates', 'gallery.html') if os.path.exists(gallery_path): fh = file(gallery_path, 'r') regenerate = fh.read() != content fh.close() else: regenerate = True if regenerate: fh = file(gallery_path, 'w') fh.write(content) fh.close() if len(data) > 0: try: import multiprocessing app.builder.info("generating thumbnails... ", nonl=True) print(list(thumbnails.items())) pool = multiprocessing.Pool() pool.map(make_thumbnail, iter(thumbnails.items())) pool.close() pool.join() app.builder.info("done") except ImportError: for key in app.builder.status_iterator( iter(thumbnails.keys()), "generating thumbnails... ", length=len(thumbnails)): image.thumbnail(key, thumbnails[key], 0.3) def setup(app): app.connect('env-updated', gen_gallery)
3,844
30.008065
81
py
amuse
amuse-main/doc/interactive_tutorial/create_title.py
import sys import os.path def create_title(name): filename, ext = os.path.splitext(name) filename = os.path.basename(filename) filename = filename.replace('-',' - ') filename = filename.replace('_',' ') lines = [] lines.append('='*len(filename)) lines.append(filename) lines.append('='*len(filename)) lines.append('') return '\n'.join(lines) if __name__ == '__main__': print(create_title(sys.argv[1]))
449
24
42
py
Paddle
Paddle-master/tools/timeline.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import sys import unittest import google.protobuf.text_format as text_format import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2 parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--profile_path', type=str, default='', help='Input profile file name. If there are multiple file, the format ' 'should be trainer1=file1,trainer2=file2,ps=file3') parser.add_argument( '--timeline_path', type=str, default='', help='Output timeline file name.') args = parser.parse_args() class _ChromeTraceFormatter(object): def __init__(self): self._events = [] self._metadata = [] def _create_event(self, ph, category, name, pid, tid, timestamp): """Creates a new Chrome Trace event. For details of the file format, see: https://github.com/catapult-project/catapult/blob/master/tracing/README.md Args: ph: The type of event - usually a single character. category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. timestamp: The timestamp of this event as a long integer. Returns: A JSON compatible event object. """ event = {} event['ph'] = ph event['cat'] = category event['name'] = name event['pid'] = pid event['tid'] = tid event['ts'] = timestamp return event def emit_pid(self, name, pid): """Adds a process metadata event to the trace. Args: name: The process name as a string. pid: Identifier of the process as an integer. """ event = {} event['name'] = 'process_name' event['ph'] = 'M' event['pid'] = pid event['args'] = {'name': name} self._metadata.append(event) def emit_region(self, timestamp, duration, pid, tid, category, name, args): """Adds a region event to the trace. Args: timestamp: The start timestamp of this region as a long integer. duration: The duration of this region as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. category: The event category as a string. name: The event name as a string. args: A JSON-compatible dictionary of event arguments. """ event = self._create_event('X', category, name, pid, tid, timestamp) event['dur'] = duration event['args'] = args self._events.append(event) def format_to_string(self, pretty=False): """Formats the chrome trace to a string. Args: pretty: (Optional.) If True, produce human-readable JSON output. Returns: A JSON-formatted string in Chrome Trace format. """ trace = {} trace['traceEvents'] = self._metadata + self._events if pretty: return json.dumps(trace, indent=4, separators=(',', ': ')) else: return json.dumps(trace, separators=(',', ':')) class Timeline(object): def __init__(self, profile_dict): self._profile_dict = profile_dict self._pid = 0 self._devices = dict() self._chrome_trace = _ChromeTraceFormatter() def _allocate_pid(self): cur_pid = self._pid self._pid += 1 return cur_pid def _allocate_pids(self): for k, profile_pb in self._profile_dict.iteritems(): for event in profile_pb.events: if event.type == profiler_pb2.Event.CPU: if (k, event.device_id, "CPU") not in self._devices: pid = self._allocate_pid() self._devices[(k, event.device_id, "CPU")] = pid self._chrome_trace.emit_pid("%s:cpu:block:%d" % (k, event.device_id), pid) elif event.type == profiler_pb2.Event.GPUKernel: if (k, event.device_id, "GPUKernel") not in self._devices: pid = self._allocate_pid() self._devices[(k, event.device_id, "GPUKernel")] = pid self._chrome_trace.emit_pid("%s:gpu:%d" % (k, event.device_id), pid) def _allocate_events(self): for k, profile_pb in self._profile_dict.iteritems(): for event in profile_pb.events: if event.type == profiler_pb2.Event.CPU: type = "CPU" elif event.type == profiler_pb2.Event.GPUKernel: type = "GPUKernel" pid = self._devices[(k, event.device_id, type)] args = {'name': event.name} if event.memcopy.bytes > 0: args = {'mem_bytes': event.memcopy.bytes} # TODO(panyx0718): Chrome tracing only handles ms. However, some # ops takes micro-seconds. Hence, we keep the ns here. self._chrome_trace.emit_region( event.start_ns, (event.end_ns - event.start_ns) / 1.0, pid, event.sub_device_id, 'Op', event.name, args) def generate_chrome_trace(self): self._allocate_pids() self._allocate_events() return self._chrome_trace.format_to_string() profile_path = '/tmp/profile' if args.profile_path: profile_path = args.profile_path timeline_path = '/tmp/timeline' if args.timeline_path: timeline_path = args.timeline_path profile_paths = profile_path.split(',') profile_dict = dict() if len(profile_paths) == 1: with open(profile_path, 'r') as f: profile_s = f.read() profile_pb = profiler_pb2.Profile() profile_pb.ParseFromString(profile_s) profile_dict['trainer'] = profile_pb else: for profile_path in profile_paths: k, v = profile_path.split('=') with open(v, 'r') as f: profile_s = f.read() profile_pb = profiler_pb2.Profile() profile_pb.ParseFromString(profile_s) profile_dict[k] = profile_pb tl = Timeline(profile_dict) with open(timeline_path, 'w') as f: f.write(tl.generate_chrome_trace())
7,120
36.088542
82
py
Paddle
Paddle-master/tools/test_runner.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import os import sys import paddle.fluid as fluid import importlib import cStringIO def main(): sys.path.append(os.getcwd()) some_test_failed = False for module_name in sys.argv[1:]: buffer = cStringIO.StringIO() main = fluid.Program() startup = fluid.Program() scope = fluid.core.Scope() with fluid.program_guard(main, startup): with fluid.scope_guard(scope): with fluid.unique_name.guard(): test_loader = unittest.TestLoader() module = importlib.import_module(module_name) tests = test_loader.loadTestsFromModule(module) res = unittest.TextTestRunner(stream=buffer).run(tests) if not res.wasSuccessful(): some_test_failed = True print >> sys.stderr, module_name, 'failed\n', buffer.getvalue( ) if some_test_failed: exit(1) if __name__ == '__main__': main()
1,659
32.877551
86
py
Paddle
Paddle-master/tools/__init__.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
612
42.785714
74
py
Paddle
Paddle-master/tools/continuous_integration/bisect.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A script to bisect the mainline commits and find the culprit commit. # The default 'git bisect' checks feature branches, which is not desired # because commits in feature branch might not pass tests or compile. # # Example: # python ../bisect.py --git_dir=$PWD/../Paddle --build_dir=$PWD \ # --good_commit=3647ed6 --bad_commit=279aa6 \ # --test_target=test_rnn_encoder_decoder import argparse import os import subprocess import sys parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--git_dir', type=str, default='', help='git repo root directory.') parser.add_argument( '--build_dir', type=str, default='', help='build directory.') parser.add_argument( '--good_commit', type=str, default='', help='The old commit known to be good.') parser.add_argument( '--bad_commit', type=str, default='', help='The new commit known to be bad.') parser.add_argument( '--test_target', type=str, default='', help='The test target to evaluate.') parser.add_argument( '--bisect_branch', type=str, default='develop', help='The mainline branch to bisect (feature branch ignored.') parser.add_argument( '--log_file', type=str, default='', help='The file use to log outputs.') parser.add_argument( '--test_times', type=int, default=10, help="Number of times to run the test target.") parser.add_argument( '--build_parallel', type=int, default=32, help="make parallelism.") args = parser.parse_args() if not args.log_file: args.log_file = '/tmp/%s...%s.log' % (args.good_commit, args.bad_commit) def print_arguments(): print('----------- Configuration Arguments -----------') for arg, value in sorted(vars(args).iteritems()): print('%s: %s' % (arg, value)) print('------------------------------------------------') print_arguments() # List the commits in mainline branch. os.chdir(args.git_dir) ret = subprocess.check_output( [ 'git rev-list --first-parent %s...%s' % (args.good_commit, args.bad_commit) ], shell=True) sys.stdout.write('commits found:\n%s\n' % ret) commits = ret.strip().split('\n') os.chdir(args.build_dir) # Clean up previous logs. subprocess.check_output(['echo "" > %s' % args.log_file], shell=True) last_culprit = '' while True: # Get to the mainline branch and clean up os.chdir(args.git_dir) subprocess.check_output( [ 'git checkout %s && git clean -fd && git checkout .' % args.bisect_branch ], shell=True) if not commits: sys.stdout.write('no commits to bisect\n') exit() # checkout the picked branch. pick_idx = len(commits) / 2 pick = commits[pick_idx] os.chdir(args.git_dir) subprocess.check_output(['git checkout %s' % pick], shell=True) # Clean builds and compile. # We assume mainline commits should always compile. os.chdir(args.build_dir) sys.stdout.write('eval commit %d/%d: %s\n' % (pick_idx, len(commits), pick)) # Link error can happen without complete clean up. cmd = ('rm -rf * && ' 'cmake -DWITH_TESTING=ON %s >> %s && make -j%s >> %s' % (args.git_dir, args.log_file, args.build_parallel, args.log_file)) sys.stdout.write('cmd: %s\n' % cmd) try: subprocess.check_output([cmd], shell=True) except subprocess.CalledProcessError as e: sys.stderr.write('failed to build commit: %s\n%s\n' % (pick, e)) exit() # test the selected branch. passed = True try: cmd = ('ctest --repeat-until-fail %s -R %s >> %s' % (args.test_times, args.test_target, args.log_file)) sys.stdout.write('cmd: %s\n' % cmd) subprocess.check_output([cmd], shell=True) except subprocess.CalledProcessError as e: passed = False last_culprit = pick sys.stdout.write('eval %s passed: %s\n' % (pick, passed)) if passed: if pick_idx == 0: break commits = commits[:pick_idx] else: if pick_idx + 1 >= len(commits): break commits = commits[pick_idx + 1:] sys.stdout.write('Culprit commit: %s\n' % last_culprit)
4,828
33.007042
80
py
Paddle
Paddle-master/tools/aws_benchmarking/client/cluster_launcher.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import time import math import logging import copy import netaddr import boto3 import namesgenerator import paramiko from scp import SCPClient import requests def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--key_name', type=str, default="", help="required, key pair name") parser.add_argument( '--security_group_id', type=str, default="", help="required, the security group id associated with your VPC") parser.add_argument( '--vpc_id', type=str, default="", help="The VPC in which you wish to run test") parser.add_argument( '--subnet_id', type=str, default="", help="The Subnet_id in which you wish to run test") parser.add_argument( '--pserver_instance_type', type=str, default="c5.2xlarge", help="your pserver instance type, c5.2xlarge by default") parser.add_argument( '--trainer_instance_type', type=str, default="p2.8xlarge", help="your trainer instance type, p2.8xlarge by default") parser.add_argument( '--task_name', type=str, default="", help="the name you want to identify your job") parser.add_argument( '--pserver_image_id', type=str, default="ami-da2c1cbf", help="ami id for system image, default one has nvidia-docker ready, \ use ami-1ae93962 for us-east-2") parser.add_argument( '--pserver_command', type=str, default="", help="pserver start command, format example: python,vgg.py,batch_size:128,is_local:yes" ) parser.add_argument( '--trainer_image_id', type=str, default="ami-da2c1cbf", help="ami id for system image, default one has nvidia-docker ready, \ use ami-1ae93962 for us-west-2") parser.add_argument( '--trainer_command', type=str, default="", help="trainer start command, format example: python,vgg.py,batch_size:128,is_local:yes" ) parser.add_argument( '--availability_zone', type=str, default="us-east-2a", help="aws zone id to place ec2 instances") parser.add_argument( '--trainer_count', type=int, default=1, help="Trainer count") parser.add_argument( '--pserver_count', type=int, default=1, help="Pserver count") parser.add_argument( '--action', type=str, default="create", help="create|cleanup|status") parser.add_argument('--pem_path', type=str, help="private key file") parser.add_argument( '--pserver_port', type=str, default="5436", help="pserver port") parser.add_argument( '--docker_image', type=str, default="busybox", help="training docker image") parser.add_argument( '--master_server_port', type=int, default=5436, help="master server port") parser.add_argument( '--master_server_public_ip', type=str, help="master server public ip") parser.add_argument( '--master_docker_image', type=str, default="putcn/paddle_aws_master:latest", help="master docker image id") parser.add_argument( '--no_clean_up', type=str2bool, default=False, help="whether to clean up after training") args = parser.parse_args() logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') ec2client = boto3.client('ec2') def print_arguments(): print('----------- Configuration Arguments -----------') for arg, value in sorted(vars(args).iteritems()): print('%s: %s' % (arg, value)) print('------------------------------------------------') def create_subnet(): # if no vpc id provided, list vpcs logging.info("start creating subnet") if not args.vpc_id: logging.info("no vpc provided, trying to find the default one") vpcs_desc = ec2client.describe_vpcs( Filters=[{ "Name": "isDefault", "Values": ["true", ] }], ) if len(vpcs_desc["Vpcs"]) == 0: raise ValueError('No default VPC') args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"] vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] logging.info("default vpc fount with id %s and CidrBlock %s" % (args.vpc_id, vpc_cidrBlock)) if not vpc_cidrBlock: logging.info("trying to find cidrblock for vpc") vpcs_desc = ec2client.describe_vpcs( Filters=[{ "Name": "vpc-id", "Values": [args.vpc_id, ], }], ) if len(vpcs_desc["Vpcs"]) == 0: raise ValueError('No VPC found') vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] logging.info("cidrblock for vpc is %s" % vpc_cidrBlock) # list subnets in vpc in order to create a new one logging.info("trying to find ip blocks for new subnet") subnets_desc = ec2client.describe_subnets( Filters=[{ "Name": "vpc-id", "Values": [args.vpc_id, ], }], ) ips_taken = [] for subnet_dec in subnets_desc["Subnets"]: ips_taken.append(subnet_dec["CidrBlock"]) ip_blocks_avaliable = netaddr.IPSet( [vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken) # adding 10 addresses as buffer cidr_prefix = 32 - math.ceil( math.log(args.pserver_count + args.trainer_count + 10, 2)) if cidr_prefix <= 16: raise ValueError('Too many nodes to fit in current VPC') for ipnetwork in ip_blocks_avaliable.iter_cidrs(): try: subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next() logging.info("subnet ip block found %s" % (subnet_cidr)) break except Exception: pass if not subnet_cidr: raise ValueError( 'No avaliable subnet to fit required nodes in current VPC') logging.info("trying to create subnet") subnet_desc = ec2client.create_subnet( CidrBlock=str(subnet_cidr), VpcId=args.vpc_id, AvailabilityZone=args.availability_zone) subnet_id = subnet_desc["Subnet"]["SubnetId"] subnet_waiter = ec2client.get_waiter('subnet_available') # sleep for 1s before checking its state time.sleep(1) subnet_waiter.wait(SubnetIds=[subnet_id, ]) logging.info("subnet created") logging.info("adding tags to newly created subnet") ec2client.create_tags( Resources=[subnet_id, ], Tags=[{ "Key": "Task_name", 'Value': args.task_name }]) return subnet_id def run_instances(image_id, instance_type, count=1, role="MASTER", cmd=""): response = ec2client.run_instances( ImageId=image_id, InstanceType=instance_type, MaxCount=count, MinCount=count, UserData=cmd, DryRun=False, InstanceInitiatedShutdownBehavior="stop", KeyName=args.key_name, Placement={'AvailabilityZone': args.availability_zone}, NetworkInterfaces=[{ 'DeviceIndex': 0, 'SubnetId': args.subnet_id, "AssociatePublicIpAddress": True, 'Groups': args.security_group_ids }], TagSpecifications=[{ 'ResourceType': "instance", 'Tags': [{ "Key": 'Task_name', "Value": args.task_name + "_master" }, { "Key": 'Role', "Value": role }] }]) instance_ids = [] for instance in response["Instances"]: instance_ids.append(instance["InstanceId"]) if len(instance_ids) > 0: logging.info(str(len(instance_ids)) + " instance(s) created") else: logging.info("no instance created") #create waiter to make sure it's running logging.info("waiting for instance to become accessible") waiter = ec2client.get_waiter('instance_status_ok') waiter.wait( Filters=[{ "Name": "instance-status.status", "Values": ["ok"] }, { "Name": "instance-status.reachability", "Values": ["passed"] }, { "Name": "instance-state-name", "Values": ["running"] }], InstanceIds=instance_ids) instances_response = ec2client.describe_instances(InstanceIds=instance_ids) return instances_response["Reservations"][0]["Instances"] def generate_task_name(): return namesgenerator.get_random_name() def init_args(): if not args.task_name: args.task_name = generate_task_name() logging.info("task name generated %s" % (args.task_name)) if not args.pem_path: args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem" if args.security_group_id: args.security_group_ids = (args.security_group_id, ) def create(): init_args() # create subnet if not args.subnet_id: args.subnet_id = create_subnet() # create master node master_instance_response = run_instances( image_id="ami-7a05351f", instance_type="t2.nano") logging.info("master server started") args.master_server_public_ip = master_instance_response[0][ "PublicIpAddress"] args.master_server_ip = master_instance_response[0]["PrivateIpAddress"] logging.info("master server started, master_ip=%s, task_name=%s" % (args.master_server_public_ip, args.task_name)) # cp config file and pems to master node ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_client.connect( hostname=args.master_server_public_ip, username="ubuntu", pkey=ssh_key) with SCPClient(ssh_client.get_transport()) as scp: scp.put(os.path.expanduser("~") + "/" + ".aws", recursive=True, remote_path='/home/ubuntu/') scp.put(args.pem_path, remote_path='/home/ubuntu/' + args.key_name + ".pem") logging.info("credentials and pem copied to master") # set arguments and start docker kick_off_cmd = "docker run -d -v /home/ubuntu/.aws:/root/.aws/" kick_off_cmd += " -v /home/ubuntu/" + args.key_name + ".pem:/root/" + args.key_name + ".pem" kick_off_cmd += " -v /home/ubuntu/logs/:/root/logs/" kick_off_cmd += " -p " + str(args.master_server_port) + ":" + str( args.master_server_port) kick_off_cmd += " " + args.master_docker_image args_to_pass = copy.copy(args) args_to_pass.action = "serve" del args_to_pass.pem_path del args_to_pass.security_group_ids del args_to_pass.master_docker_image del args_to_pass.master_server_public_ip for arg, value in sorted(vars(args_to_pass).iteritems()): if value: kick_off_cmd += ' --%s %s' % (arg, value) logging.info(kick_off_cmd) stdin, stdout, stderr = ssh_client.exec_command(command=kick_off_cmd) return_code = stdout.channel.recv_exit_status() logging.info(return_code) if return_code != 0: raise Exception("Error while kicking off master") logging.info( "master server finished init process, visit %s to check master log" % (get_master_web_url("/status"))) def cleanup(): print requests.post(get_master_web_url("/cleanup")).text def status(): print requests.post(get_master_web_url("/status")).text def get_master_web_url(path): return "http://" + args.master_server_public_ip + ":" + str( args.master_server_port) + path if __name__ == "__main__": print_arguments() if args.action == "create": if not args.key_name or not args.security_group_id: raise ValueError("key_name and security_group_id are required") create() elif args.action == "cleanup": if not args.master_server_public_ip: raise ValueError("master_server_public_ip is required") cleanup() elif args.action == "status": if not args.master_server_public_ip: raise ValueError("master_server_public_ip is required") status()
12,777
29.716346
96
py
Paddle
Paddle-master/tools/aws_benchmarking/server/cluster_master.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import json import math import time import threading import logging import copy import csv import netaddr import boto3 import namesgenerator import paramiko from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer # You must have aws_access_key_id, aws_secret_access_key, region set in # ~/.aws/credentials and ~/.aws/config def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--key_name', type=str, default="", help="required, key pair name") parser.add_argument( '--security_group_id', type=str, default="", help="required, the security group id associated with your VPC") parser.add_argument( '--vpc_id', type=str, default="", help="The VPC in which you wish to run test") parser.add_argument( '--subnet_id', type=str, default="", help="The Subnet_id in which you wish to run test") parser.add_argument( '--pserver_instance_type', type=str, default="c5.2xlarge", help="your pserver instance type, c5.2xlarge by default") parser.add_argument( '--trainer_instance_type', type=str, default="p2.8xlarge", help="your trainer instance type, p2.8xlarge by default") parser.add_argument( '--task_name', type=str, default="", help="the name you want to identify your job") parser.add_argument( '--pserver_image_id', type=str, default="ami-da2c1cbf", help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-east-2" ) parser.add_argument( '--trainer_image_id', type=str, default="ami-da2c1cbf", help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-west-2" ) parser.add_argument( '--availability_zone', type=str, default="us-east-2a", help="aws zone id to place ec2 instances") parser.add_argument( '--trainer_count', type=int, default=1, help="Trainer count") parser.add_argument( '--pserver_count', type=int, default=1, help="Pserver count") parser.add_argument( '--pserver_bash_file', type=str, default=os.path.join(os.path.dirname(__file__), "pserver.sh.template"), help="pserver bash file path") parser.add_argument( '--pserver_command', type=str, default="", help="pserver start command") parser.add_argument( '--trainer_bash_file', type=str, default=os.path.join(os.path.dirname(__file__), "trainer.sh.template"), help="trainer bash file path") parser.add_argument( '--trainer_command', type=str, default="", help="trainer start command") parser.add_argument( '--action', type=str, default="serve", help="create|cleanup|serve") parser.add_argument('--pem_path', type=str, help="private key file") parser.add_argument( '--pserver_port', type=str, default="5436", help="pserver port") parser.add_argument( '--docker_image', type=str, default="busybox", help="training docker image") parser.add_argument( '--master_server_port', type=int, default=5436, help="master server port") parser.add_argument( '--master_server_ip', type=str, default="", help="master server private ip") parser.add_argument( '--metric_data_identifier', type=str, default="**metrics_data: ", help="key string to identify metrics data") parser.add_argument( '--no_clean_up', type=str2bool, default=False, help="whether to clean up after training") args = parser.parse_args() ec2client = boto3.client('ec2') args.log_path = os.path.join(os.path.dirname(__file__), "logs/") logging.basicConfig( filename=args.log_path + 'master.log', level=logging.INFO, format='%(asctime)s %(message)s') log_files = ["master.log"] metrics = {} metrics_csv_file_name = "metrics.csv" is_metrics_file_created = False def create_subnet(): # if no vpc id provided, list vpcs logging.info("start creating subnet") if not args.vpc_id: logging.info("no vpc provided, trying to find the default one") vpcs_desc = ec2client.describe_vpcs( Filters=[{ "Name": "isDefault", "Values": ["true", ] }], ) if len(vpcs_desc["Vpcs"]) == 0: raise ValueError('No default VPC') args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"] vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] logging.info("default vpc fount with id %s and CidrBlock %s" % (args.vpc_id, vpc_cidrBlock)) if not vpc_cidrBlock: logging.info("trying to find cidrblock for vpc") vpcs_desc = ec2client.describe_vpcs( Filters=[{ "Name": "vpc-id", "Values": [args.vpc_id, ], }], ) if len(vpcs_desc["Vpcs"]) == 0: raise ValueError('No VPC found') vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] logging.info("cidrblock for vpc is %s" % vpc_cidrBlock) # list subnets in vpc in order to create a new one logging.info("trying to find ip blocks for new subnet") subnets_desc = ec2client.describe_subnets( Filters=[{ "Name": "vpc-id", "Values": [args.vpc_id, ], }], ) ips_taken = [] for subnet_dec in subnets_desc["Subnets"]: ips_taken.append(subnet_dec["CidrBlock"]) ip_blocks_avaliable = netaddr.IPSet( [vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken) # adding 10 addresses as buffer cidr_prefix = 32 - math.ceil( math.log(args.pserver_count + args.trainer_count + 10, 2)) if cidr_prefix <= 16: raise ValueError('Too many nodes to fit in current VPC') for ipnetwork in ip_blocks_avaliable.iter_cidrs(): try: subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next() logging.info("subnet ip block found %s" % (subnet_cidr)) break except Exception: pass if not subnet_cidr: raise ValueError( 'No avaliable subnet to fit required nodes in current VPC') logging.info("trying to create subnet") subnet_desc = ec2client.create_subnet( CidrBlock=str(subnet_cidr), VpcId=args.vpc_id, AvailabilityZone=args.availability_zone) subnet_id = subnet_desc["Subnet"]["SubnetId"] subnet_waiter = ec2client.get_waiter('subnet_available') # sleep for 1s before checking its state time.sleep(1) subnet_waiter.wait(SubnetIds=[subnet_id, ]) logging.info("subnet created") logging.info("adding tags to newly created subnet") ec2client.create_tags( Resources=[subnet_id, ], Tags=[{ "Key": "Task_name", 'Value': args.task_name }]) return subnet_id def generate_task_name(): return namesgenerator.get_random_name() def script_to_str(file_path): if not file_path: return "echo $PSERVER_HOSTS" file = open(file_path, 'r') text = file.read().strip() file.close() return text def run_instances(image_id, instance_type, count, role, cmd=""): if count == 0: return [] response = ec2client.run_instances( ImageId=image_id, InstanceType=instance_type, MaxCount=count, MinCount=count, UserData=cmd, DryRun=False, InstanceInitiatedShutdownBehavior="stop", KeyName=args.key_name, Placement={'AvailabilityZone': args.availability_zone}, NetworkInterfaces=[{ 'DeviceIndex': 0, 'SubnetId': args.subnet_id, "AssociatePublicIpAddress": True, 'Groups': args.security_group_ids }], TagSpecifications=[{ 'ResourceType': "instance", 'Tags': [{ "Key": 'Task_name', "Value": args.task_name }, { "Key": 'Role', "Value": role }] }]) instance_ids = [] for instance in response["Instances"]: instance_ids.append(instance["InstanceId"]) if len(instance_ids) > 0: logging.info(str(len(instance_ids)) + " instance(s) created") else: logging.info("no instance created") #create waiter to make sure it's running logging.info("waiting for instance to become accessible") waiter = ec2client.get_waiter('instance_status_ok') waiter.wait( Filters=[{ "Name": "instance-status.status", "Values": ["ok"] }, { "Name": "instance-status.reachability", "Values": ["passed"] }, { "Name": "instance-state-name", "Values": ["running"] }], InstanceIds=instance_ids) instances_response = ec2client.describe_instances(InstanceIds=instance_ids) return instances_response["Reservations"][0]["Instances"] def create_pservers(): try: return run_instances( image_id=args.pserver_image_id, instance_type=args.pserver_instance_type, count=args.pserver_count, role="PSERVER", ) except Exception: logging.exception("error while trying to create pservers") cleanup(args.task_name) def save_metrics_data(str_msg): #parse msg logging.info("found metrics data, saving it to csv file") global is_metrics_file_created metrics_raw = str_msg.split(",") with open(args.log_path + metrics_csv_file_name, 'a') as csvfile: csv_fieldnames = [] csv_write_data = {} for metric in metrics_raw: metric_data = metric.split("=") metric_key = metric_data[0].strip() metric_val = float(metric_data[1].strip()) if not metric_key in metrics: metrics[metric_key] = [] metric_repo = metrics[metric_key] metric_repo.append(metric_val) csv_fieldnames.append(metric_key) csv_write_data[metric_key] = metric_val writer = csv.DictWriter(csvfile, fieldnames=csv_fieldnames) if not is_metrics_file_created: writer.writeheader() is_metrics_file_created = True writer.writerow(csv_write_data) logging.info("csv file appended") def log_to_file(source, filename): if not filename in log_files: log_files.append(filename) with open(args.log_path + filename, "a") as log_file: for line in iter(source.readline, ""): log_file.write(line) if (line.startswith(args.metric_data_identifier)): #found key data, trying to add to csv line = line.replace(args.metric_data_identifier, "") save_metrics_data(line) def parse_command(command_raw, defaults={}): if not command_raw: command_raw = "" commands_processed = [] parameter_map = copy.copy(defaults) for seg in command_raw.split(","): if ":" in seg: parameters = seg.split(":") parameter_map[parameters[0]] = parameters[1] else: commands_processed.append(seg) for key, val in parameter_map.iteritems(): commands_processed.append("--" + key + " " + str(val)) return " ".join(commands_processed) def create_trainers(kickoff_cmd, pserver_endpoints_str): def create_and_start_trainer(trainer_index): logging.info("trainer " + str(trainer_index) + " is starting") instance_response = run_instances( image_id=args.trainer_image_id, instance_type=args.trainer_instance_type, count=1, role="TRAINER", )[0] trainer_ip = instance_response["PrivateIpAddress"] logging.info("trainer " + str(trainer_index) + " started") ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_client.connect(hostname=trainer_ip, username="ubuntu", pkey=ssh_key) logging.info("trainer " + str(trainer_index) + " terminal connected via ssh") cmd = kickoff_cmd.format( PSERVER_HOSTS=pserver_endpoints_str, DOCKER_IMAGE=args.docker_image, TRAINER_INDEX=str(trainer_index), TASK_NAME=args.task_name, TRAINER_COUNT=args.trainer_count, COMMAND=parse_command(args.trainer_command, {"device": "GPU"}), MASTER_ENDPOINT=args.master_server_ip + ":" + str(args.master_server_port)) logging.info(cmd) stdin, stdout, stderr = ssh_client.exec_command(command=cmd) # read and save output log logging.info("trainer " + str(trainer_index) + " command executed, keep fetching log") stdout_thread = threading.Thread( target=log_to_file, args=( stdout, "trainer_" + str(trainer_index) + ".log", )) stderr_thread = threading.Thread( target=log_to_file, args=( stderr, "trainer_" + str(trainer_index) + "_err.log", )) stdout_thread.start() stderr_thread.start() stdout_thread.join() stderr_thread.join() return_code = stdout.channel.recv_exit_status() if return_code != 0: trainer_create_results[trainer_index] = {'has_error': True} raise ValueError("trainer didn't finish with exit code 0") ssh_client.close() # multi thread starting trainer instance and run kickoff command trainer_threads = [] trainer_create_results = {} try: for i in xrange(args.trainer_count): logging.info("starting tread for trainer " + str(i)) trainer_thread = threading.Thread( target=create_and_start_trainer, args=(i, )) trainer_thread.start() trainer_threads.append(trainer_thread) for trainer_thread in trainer_threads: trainer_thread.join() for result in trainer_create_results: if result["has_error"]: logging.error( "error during trainer starting or training, destorying the while cluster " ) cleanup(args.task_name) break logging.info("all trainers stopped") except Exception, e: logging.info( "Training exception, clean up resources, please check log for more info" ) finally: cleanup(args.task_name) def cleanup(task_name): if args.no_clean_up: logging.info("no clean up option set, going to leave the setup running") return #shutdown all ec2 instances print("going to clean up " + task_name + " instances") instances_response = ec2client.describe_instances(Filters=[{ "Name": "tag:Task_name", "Values": [task_name] }]) instance_ids = [] if len(instances_response["Reservations"]) > 0: for reservation in instances_response["Reservations"]: for instance in reservation["Instances"]: instance_ids.append(instance["InstanceId"]) ec2client.terminate_instances(InstanceIds=instance_ids) instance_termination_waiter = ec2client.get_waiter( 'instance_terminated') instance_termination_waiter.wait(InstanceIds=instance_ids) #delete the subnet created subnet = ec2client.describe_subnets(Filters=[{ "Name": "tag:Task_name", "Values": [task_name] }]) if len(subnet["Subnets"]) > 0: ec2client.delete_subnet(SubnetId=subnet["Subnets"][0]["SubnetId"]) # no subnet delete waiter, just leave it. logging.info("Clearnup done") return def kickoff_pserver(host, pserver_endpoints_str): try: ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_client.connect(hostname=host, username="ubuntu", pkey=ssh_key) cmd = (script_to_str(args.pserver_bash_file)).format( PSERVER_HOSTS=pserver_endpoints_str, DOCKER_IMAGE=args.docker_image, PSERVER_PORT=args.pserver_port, TASK_NAME=args.task_name, COMMAND=parse_command(args.pserver_command, {"device": "CPU"}), TRAINER_COUNT=args.trainer_count, TRAINER_INDEX=0, # there is no way to use 0.0.0.0:port to start pserver # has to docker --network="host" with host ip to make this work SERVER_ENDPOINT=host + ":" + str(args.pserver_port), MASTER_ENDPOINT=args.master_server_ip + ":" + str(args.master_server_port)) logging.info(cmd) stdin, stdout, stderr = ssh_client.exec_command(command=cmd) stdout_thread = threading.Thread( target=log_to_file, args=( stdout, "pserver_" + host + ".log", )) stderr_thread = threading.Thread( target=log_to_file, args=( stderr, "pserver_" + host + "_err.log", )) stdout_thread.start() stderr_thread.start() stdout_thread.join() stderr_thread.join() return_code = stdout.channel.recv_exit_status() logging.info(return_code) if return_code != 0: raise Exception("Error while kicking off pserver training process") except Exception: logging.exception("Error while kicking off pserver training process") cleanup(args.task_name) finally: ssh_client.close() def init_args(): if not args.task_name: args.task_name = generate_task_name() logging.info("task name generated %s" % (args.task_name)) if not args.pem_path: args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem" if args.security_group_id: args.security_group_ids = (args.security_group_id, ) args.trainers_job_done_count = 0 def create_cluster(): if not args.subnet_id: logging.info("creating subnet for this task") args.subnet_id = create_subnet() logging.info("subnet %s created" % (args.subnet_id)) logging.info("creating pservers") pserver_create_response = create_pservers() logging.info("pserver created, collecting pserver ips") pserver_endpoints = [] for pserver in pserver_create_response: pserver_endpoints.append(pserver["NetworkInterfaces"][0][ "PrivateIpAddress"] + ":" + args.pserver_port) pserver_endpoints_str = ",".join(pserver_endpoints) logging.info("kicking off pserver training process") pserver_threads = [] for pserver in pserver_create_response: pserver_thread = threading.Thread( target=kickoff_pserver, args=(pserver["PrivateIpAddress"], pserver_endpoints_str)) pserver_thread.start() pserver_threads.append(pserver_thread) logging.info("all pserver training process started") logging.info("creating trainers and kicking off trainer training process") create_trainers( kickoff_cmd=script_to_str(args.trainer_bash_file), pserver_endpoints_str=pserver_endpoints_str) for pserver_thread in pserver_threads: pserver_thread.join() logging.info("all process ended") def start_server(args): class S(BaseHTTPRequestHandler): def _set_headers(self): self.send_response(200) self.send_header('Content-type', 'text/text') self.end_headers() def do_HEAD(self): self._set_headers() def do_404(self): self.send_response(404) self.send_header('Content-type', 'text/text') self.end_headers() logging.info("Received invalid GET request" + self.path) self.wfile.write("NO ACTION FOUND") def do_GET(self): request_path = self.path if request_path == "/status" or request_path == "/master_logs": self._set_headers() logging.info("Received request to return status") with open(args.log_path + "master.log", "r") as logfile: self.wfile.write(logfile.read().strip()) elif request_path == "/list_logs" or request_path == "/logs": self._set_headers() self.wfile.write("\n".join(log_files)) elif "/log/" in request_path: self._set_headers() log_file_path = request_path.replace("/log/", "") logging.info("requesting log file path is" + args.log_path + log_file_path) with open(args.log_path + log_file_path, "r") as logfile: self.wfile.write(logfile.read().strip()) else: self.do_404() def do_POST(self): request_path = self.path if request_path == "/save_data": self._set_headers() logging.info("Received request to save data") self.wfile.write("DATA SAVED!") content_length = int(self.headers['Content-Length']) post_data = self.rfile.read(content_length) if args.task_name: with open(args.task_name + ".txt", "a") as text_file: text_file.write(post_data + "\n") elif request_path == "/cleanup": self._set_headers() logging.info("Received request to cleanup cluster") args.no_clean_up = False cleanup(args.task_name) self.wfile.write("cleanup in progress") else: self.do_404() server_address = ('', args.master_server_port) httpd = HTTPServer(server_address, S) logging.info("HTTP server is starting") httpd.serve_forever() def print_arguments(): logging.info('----------- Configuration Arguments -----------') for arg, value in sorted(vars(args).iteritems()): logging.info('%s: %s' % (arg, value)) logging.info('------------------------------------------------') if __name__ == "__main__": print_arguments() if args.action == "create": logging.info("going to create cluster") if not args.key_name or not args.security_group_id: raise ValueError("key_name and security_group_id are required") init_args() create_cluster() elif args.action == "cleanup": logging.info("going to cleanup cluster") if not args.task_name: raise ValueError("task_name is required") cleanup(args.task_name) elif args.action == "serve": # serve mode if not args.master_server_ip: raise ValueError( "No master server ip set, please run with --action create") logging.info("going to start serve and create cluster") init_args() logging.info("starting server in another thread") server_thread = threading.Thread(target=start_server, args=(args, )) server_thread.start() create_cluster() server_thread.join() elif args.action == "test": start_server(args)
24,187
31.86413
103
py
Paddle
Paddle-master/tools/codestyle/docstring_checker.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DocstringChecker is used to check python doc string's style.""" import six import astroid from pylint.checkers import BaseChecker, utils from pylint.interfaces import IAstroidChecker from collections import defaultdict import re def register(linter): """Register checkers.""" linter.register_checker(DocstringChecker(linter)) class Docstring(object): """Docstring class holds the parsed doc string elements. """ def __init__(self): self.d = defaultdict(list) #name->[] self.clear() def clear(self): self.d['Args'] = [] self.d['Examples'] = [] self.d['Returns'] = [] self.d['Raises'] = [] self.args = {} #arg_name->arg_type def get_level(self, string, indent=' '): level = 0 unit_size = len(indent) while string[:unit_size] == indent: string = string[unit_size:] level += 1 return level def parse(self, doc): """parse gets sections from doc Such as Args, Returns, Raises, Examples s Args: doc (string): is the astroid node doc string. Returns: True if doc is parsed successfully. """ self.clear() lines = doc.splitlines() state = ("others", -1) for l in lines: c = l.strip() if len(c) <= 0: continue level = self.get_level(l) if c.startswith("Args:"): state = ("Args", level) elif c.startswith("Returns:"): state = ("Returns", level) elif c.startswith("Raises:"): state = ("Raises", level) elif c.startswith("Examples:"): state = ("Examples", level) else: if level > state[1]: self.d[state[0]].append(c) continue state = ("others", -1) self.d[state[0]].append(c) self._arg_with_type() return True def get_returns(self): return self.d['Returns'] def get_raises(self): return self.d['Raises'] def get_examples(self): return self.d['Examples'] def _arg_with_type(self): for t in self.d['Args']: m = re.search('([A-Za-z0-9_-]+)\s{0,4}(\(.+\))\s{0,4}:', t) if m: self.args[m.group(1)] = m.group(2) return self.args class DocstringChecker(BaseChecker): """DosstringChecker is pylint checker to check docstring style. """ __implements__ = (IAstroidChecker, ) POSITIONAL_MESSAGE_ID = 'str-used-on-positional-format-argument' KEYWORD_MESSAGE_ID = 'str-used-on-keyword-format-argument' name = 'doc-string-checker' symbol = "doc-string" priority = -1 msgs = { 'W9001': ('One line doc string on > 1 lines', symbol + "-one-line", 'Used when a short doc string is on multiple lines'), 'W9002': ('Doc string does not end with "." period', symbol + "-end-with", 'Used when a doc string does not end with a period'), 'W9003': ('All args with their types must be mentioned in doc string', symbol + "-with-all-args", 'Used when not all arguments are in the doc string '), 'W9005': ('Missing docstring or docstring is too short', symbol + "-missing", 'Add docstring longer >=10'), 'W9006': ('Docstring indent error, use 4 space for indent', symbol + "-indent-error", 'Use 4 space for indent'), 'W9007': ('You should add `Returns` in comments', symbol + "-with-returns", 'There should be a `Returns` section in comments'), 'W9008': ('You should add `Raises` section in comments', symbol + "-with-raises", 'There should be a `Raises` section in comments'), } options = () def visit_functiondef(self, node): """visit_functiondef checks Function node docstring style. Args: node (astroid.node): The visiting node. Returns: True if successful other wise False. """ self.check_doc_string(node) if node.tolineno - node.fromlineno <= 10: return True if not node.doc: return True doc = Docstring() doc.parse(node.doc) self.all_args_in_doc(node, doc) self.with_returns(node, doc) self.with_raises(node, doc) def visit_module(self, node): self.check_doc_string(node) def visit_classdef(self, node): self.check_doc_string(node) def check_doc_string(self, node): self.missing_doc_string(node) self.one_line(node) self.has_period(node) self.indent_style(node) def missing_doc_string(self, node): if node.tolineno - node.fromlineno <= 10: return True if node.doc is None or len(node.doc) < 10: self.add_message('W9005', node=node, line=node.fromlineno) return False # FIXME(gongwb): give the docstring line-no def indent_style(self, node, indent=4): """indent_style checks docstring's indent style Args: node (astroid.node): The visiting node. indent (int): The default indent of style Returns: True if successful other wise False. """ if node.doc is None: return True doc = node.doc lines = doc.splitlines() for l in lines: cur_indent = len(l) - len(l.lstrip()) if cur_indent % indent != 0: self.add_message('W9006', node=node, line=node.fromlineno) return False return True def one_line(self, node): """one_line checks if docstring (len < 40) is on one line. Args: node (astroid.node): The node visiting. Returns: True if successful otherwise False. """ doc = node.doc if doc is None: return True if len(doc) > 40: return True elif sum(doc.find(nl) for nl in ('\n', '\r', '\n\r')) == -3: return True else: self.add_message('W9001', node=node, line=node.fromlineno) return False return True def has_period(self, node): """has_period checks if one line doc end-with '.' . Args: node (astroid.node): the node is visiting. Returns: True if successful otherwise False. """ if node.doc is None: return True if len(node.doc.splitlines()) > 1: return True if not node.doc.strip().endswith('.'): self.add_message('W9002', node=node, line=node.fromlineno) return False return True def with_raises(self, node, doc): """with_raises checks if one line doc end-with '.' . Args: node (astroid.node): the node is visiting. doc (Docstring): Docstring object. Returns: True if successful otherwise False. """ find = False for t in node.body: if not isinstance(t, astroid.Raise): continue find = True break if not find: return True if len(doc.get_raises()) == 0: self.add_message('W9008', node=node, line=node.fromlineno) return False return True def with_returns(self, node, doc): """with_returns checks if docstring comments what are returned . Args: node (astroid.node): the node is visiting. doc (Docstring): Docstring object. Returns: True if successful otherwise False. """ find = False for t in node.body: if not isinstance(t, astroid.Return): continue find = True break if not find: return True if len(doc.get_returns()) == 0: self.add_message('W9007', node=node, line=node.fromlineno) return False return True def all_args_in_doc(self, node, doc): """all_args_in_doc checks if arguments are mentioned in doc Args: node (astroid.node): the node is visiting. doc (Docstring): Docstring object Returns: True if successful otherwise False. """ args = [] for arg in node.args.get_children(): if (not isinstance(arg, astroid.AssignName)) \ or arg.name == "self": continue args.append(arg.name) if len(args) <= 0: return True parsed_args = doc.args if len(args) > 0 and len(parsed_args) <= 0: print "debug:parsed args: ", parsed_args self.add_message('W9003', node=node, line=node.fromlineno) return False for t in args: if t not in parsed_args: print t, " with (type) not in ", parsed_args self.add_message('W9003', node=node, line=node.fromlineno) return False return True
9,953
28.713433
78
py
Paddle
Paddle-master/tools/codestyle/test_docstring_checker.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import docstring_checker import pylint.testutils import astroid import pytest import sys class TestDocstring(pylint.testutils.CheckerTestCase): CHECKER_CLASS = docstring_checker.DocstringChecker def test_one_line(self): func_node = astroid.extract_node(''' def test(): """get news. """ if True: return 5 return 5 ''') self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 1 assert 'W9001' == got[0][0] def test_one_line(self): func_node = astroid.extract_node(''' def test(): """get news""" if True: return 5 return 5 ''') self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 1 assert 'W9002' == got[0][0] def test_args(self): func_node = astroid.extract_node(''' def test(scale, mean): """get news. Args: scale (int): scale is the number. """ mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale ''') self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 1 assert 'W9003' == got[0][0] def test_missing(self): func_node = astroid.extract_node(''' def test(): mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale ''') self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 1 assert 'W9005' == got[0][0] def test_indent(self): func_node = astroid.extract_node(''' def test(): """ get get get get get get get get get get get get get get get get. """ pass ''') self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 1 assert 'W9006' == got[0][0] def test_with_resturns(self): func_node = astroid.extract_node(''' def test(): """get news. Args: scale (int): scale is the number. """ mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale return mean ''') self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 1 assert 'W9007' == got[0][0] def test_with_raises(self): func_node = astroid.extract_node(''' def test(): """get news. Args: scale (int): scale is the number. """ mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale mean=scale raise ValueError('A very specific bad thing happened.') ''') self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 1 assert 'W9008' == got[0][0] def test_no_message(self): p = ''' def fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None): """ **Fully Connected Layer** The fully connected layer can take multiple tensors as its inputs. It creates a variable called weights for each input tensor, which represents a fully connected weight matrix from each input unit to each output unit. The fully connected layer multiplies each input tensor with its coresponding weight to produce an output Tensor. If multiple input tensors are given, the results of multiple multiplications will be sumed up. If bias_attr is not None, a bias variable will be created and added to the output. Finally, if activation is not None, it will be applied to the output as well. This process can be formulated as follows: Args: input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of the input tensor(s) is at least 2. size(int): The number of output units in this layer. num_flatten_dims (int, default 1): The fc layer can accept an input tensor with more than two dimensions. If this happens, the multidimensional tensor will first be flattened into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1) dimensions will be flatten to form the first dimension of the final matrix (height of the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to form the second dimension of the final matrix (width of the matrix). For example, suppose `X` is a 6-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3. Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable parameters/weights of this layer. bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias of this layer. If it is set to None, no bias will be added to the output units. act (str, default None): Activation to be applied to the output of this layer. name (str, default None): The name of this layer. Returns: A tensor variable storing the transformation result. Raises: ValueError: If rank of the input tensor is less than 2. Examples: .. code-block:: python data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") fc = fluid.layers.fc(input=data, size=1000, act="tanh") """ raise ValueError('A very specific bad thing happened.') size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 size = 1 return size ''' func_node = astroid.extract_node(p) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() assert len(got) == 0
7,639
31.7897
101
py
Paddle
Paddle-master/tools/manylinux1/build_scripts/ssl-check.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # cf. https://github.com/pypa/manylinux/issues/53 GOOD_SSL = "https://google.com" BAD_SSL = "https://self-signed.badssl.com" import sys print("Testing SSL certificate checking for Python:", sys.version) if (sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4)): print("This version never checks SSL certs; skipping tests") sys.exit(0) if sys.version_info[0] >= 3: from urllib.request import urlopen EXC = OSError else: from urllib import urlopen EXC = IOError print("Connecting to %s should work" % (GOOD_SSL, )) urlopen(GOOD_SSL) print("...it did, yay.") print("Connecting to %s should fail" % (BAD_SSL, )) try: urlopen(BAD_SSL) # If we get here then we failed: print("...it DIDN'T!!!!!11!!1one!") sys.exit(1) except EXC: print("...it did, yay.")
1,422
29.276596
74
py
Paddle
Paddle-master/tools/manylinux1/build_scripts/manylinux1-check.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Logic copied from PEP 513 def is_manylinux1_compatible(): # Only Linux, and only x86-64 / i686 from distutils.util import get_platform if get_platform() not in ["linux-x86_64", "linux-i686"]: return False # Check for presence of _manylinux module try: import _manylinux return bool(_manylinux.manylinux1_compatible) except (ImportError, AttributeError): # Fall through to heuristic check below pass # Check glibc version. CentOS 5 uses glibc 2.5. return have_compatible_glibc(2, 5) def have_compatible_glibc(major, minimum_minor): import ctypes process_namespace = ctypes.CDLL(None) try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return False # Call gnu_get_libc_version, which returns a string like "2.5". gnu_get_libc_version.restype = ctypes.c_char_p version_str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") # Parse string and check against requested version. version = [int(piece) for piece in version_str.split(".")] assert len(version) == 2 if major != version[0]: return False if minimum_minor > version[1]: return False return True import sys if is_manylinux1_compatible(): print("%s is manylinux1 compatible" % (sys.executable, )) sys.exit(0) else: print("%s is NOT manylinux1 compatible" % (sys.executable, )) sys.exit(1)
2,258
30.816901
74
py
Paddle
Paddle-master/tools/manylinux1/build_scripts/python-tag-abi-tag.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Utility script to print the python tag + the abi tag for a Python # See PEP 425 for exactly what these are, but an example would be: # cp27-cp27mu from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag print("{0}{1}-{2}".format(get_abbr_impl(), get_impl_ver(), get_abi_tag()))
911
40.454545
74
py
Paddle
Paddle-master/python/__init__.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
612
42.785714
74
py
Paddle
Paddle-master/python/paddle/batch.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = ['batch'] def batch(reader, batch_size): """ Create a batched reader. :param reader: the data reader to read from. :type reader: callable :param batch_size: size of each mini-batch :type batch_size: int :return: the batched reader. :rtype: callable """ def batch_reader(): r = reader() b = [] for instance in r: b.append(instance) if len(b) == batch_size: yield b b = [] if b: yield b return batch_reader
1,174
26.97619
74
py
Paddle
Paddle-master/python/paddle/__init__.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from version import full_version as __version__ from version import commit as __git_commit__ except ImportError: import sys sys.stderr.write('''Warning with import paddle: you should not import paddle from the source directory; please install paddlepaddle*.whl firstly.''' ) import reader import dataset import batch batch = batch.batch
996
34.607143
90
py
Paddle
Paddle-master/python/paddle/trainer/PyDataProvider2.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cPickle import logging import collections import functools import itertools logging.basicConfig(format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)s]" " %(message)s") class SequenceType(object): NO_SEQUENCE = 0 SEQUENCE = 1 SUB_SEQUENCE = 2 @classmethod def tostring(cls, value): for k in cls.__dict__: if not k.startswith('__'): if getattr(cls, k) == value: return cls.__name__ + '.' + k return 'INVALID(' + str(value) + ')' # TODO(yuyang18): Add string data type here. class DataType(object): Dense = 0 SparseNonValue = 1 SparseValue = 2 Index = 3 @classmethod def tostring(cls, value): for k in cls.__dict__: if not k.startswith('__'): if getattr(cls, k) == value: return cls.__name__ + '.' + k return 'INVALID(' + str(value) + ')' class CacheType(object): NO_CACHE = 0 # No cache at all # First pass, read data from python. And store them in memory. Read from # memory during rest passes. CACHE_PASS_IN_MEM = 1 class InputType(object): """ InputType is the base class for paddle input types. .. note:: this is a base class, and should never be used by user. :param dim: dimension of input. If the input is an integer, it means the value range. Otherwise, it means the size of layer. :type dim: int :param seq_type: sequence type of input. 0 means it is not a sequence. 1 means it is a variable length sequence. 2 means it is a nested sequence. :type seq_type: int :param type: data type of input. :type type: int """ __slots__ = ['dim', 'seq_type', 'type'] def __init__(self, dim, seq_type, tp): self.dim = dim self.seq_type = seq_type self.type = tp def __repr__(self): """ Return a human readable representation like 'InputType(dim=25921, seq_type=SequenceType.NO_SEQUENCE, type=DataType.Dense)' """ repr_str = type(self).__name__ repr_str += '(' serialize_func_map = { 'dim': repr, 'seq_type': SequenceType.tostring, 'type': DataType.tostring } for idx, k in enumerate(self.__slots__): if idx != 0: repr_str += ', ' repr_str += ( k + '=' + serialize_func_map.get(k, repr)(getattr(self, k))) repr_str += ')' return repr_str def dense_slot(dim, seq_type=SequenceType.NO_SEQUENCE): """ Dense Array. It means the input feature is dense array with float type. For example, if the input is an image with 28*28 pixels, the input of Paddle neural network could be a dense vector with dimension 784 or a numpy array with shape (28, 28). For the 2-D convolution operation, each sample in one mini-batch must have the similarly size in PaddlePaddle now. But, it supports variable-dimension feature across mini-batch. For the variable-dimension, the param dim is not used. While the data reader must yield numpy array and the data feeder will set the data shape correctly. :param dim: dimension of this vector. :type dim: int :param seq_type: sequence type of input. :type seq_type: int :return: An input type object. :rtype: InputType """ return InputType(dim, seq_type, DataType.Dense) def sparse_non_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE): """ Sparse binary vector. It means the input feature is a sparse vector and the every element in this vector is either zero or one. :param dim: dimension of this vector. :type dim: int :param seq_type: sequence type of this input. :type seq_type: int :return: An input type object. :rtype: InputType """ return InputType(dim, seq_type, DataType.SparseNonValue) def sparse_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE): """ Sparse vector. It means the input feature is a sparse vector. Most of the elements in this vector are zero, others could be any float value. :param dim: dimension of this vector. :type dim: int :param seq_type: sequence type of this input. :type seq_type: int :return: An input type object. :rtype: InputType """ return InputType(dim, seq_type, DataType.SparseValue) def index_slot(value_range, seq_type=SequenceType.NO_SEQUENCE): """ Data type of integer. :param seq_type: sequence type of this input. :type seq_type: int :param value_range: range of this integer. :type value_range: int :return: An input type object :rtype: InputType """ return InputType(value_range, seq_type, DataType.Index) dense_vector = dense_slot sparse_binary_vector = sparse_non_value_slot sparse_float_vector = sparse_value_slot integer_value = index_slot # dense_array can be used for variable-length input feature. # Each feature is not a vector, but a multi-dimensional array. dense_array = dense_slot def dense_vector_sequence(dim): """ Data type of a sequence of dense vector. :param dim: dimension of dense vector. :type dim: int :return: An input type object :rtype: InputType """ return dense_vector(dim, seq_type=SequenceType.SEQUENCE) def dense_vector_sub_sequence(dim): return dense_vector(dim, seq_type=SequenceType.SUB_SEQUENCE) def sparse_binary_vector_sequence(dim): """ Data type of a sequence of sparse vector, which every element is either zero or one. :param dim: dimension of sparse vector. :type dim: int :return: An input type object :rtype: InputType """ return sparse_binary_vector(dim, seq_type=SequenceType.SEQUENCE) def sparse_binary_vector_sub_sequence(dim): return sparse_binary_vector(dim, seq_type=SequenceType.SUB_SEQUENCE) def sparse_float_vector_sequence(dim): """ Data type of a sequence of sparse vector, which most elements are zero, others could be any float value. :param dim: dimension of sparse vector. :type dim: int :return: An input type object :rtype: InputType """ return sparse_float_vector(dim, seq_type=SequenceType.SEQUENCE) def sparse_float_vector_sub_sequence(dim): return sparse_float_vector(dim, seq_type=SequenceType.SUB_SEQUENCE) def integer_value_sequence(value_range): """ Data type of a sequence of integer. :param value_range: range of each element. :type value_range: int """ return integer_value(value_range, seq_type=SequenceType.SEQUENCE) def integer_value_sub_sequence(dim): return integer_value(dim, seq_type=SequenceType.SUB_SEQUENCE) integer_sequence = integer_value_sequence class SingleSlotWrapper(object): def __init__(self, generator): self.generator = generator def __call__(self, obj, filename): for item in self.generator(obj, filename): if isinstance(item, dict): yield item else: yield [item] class InputOrderWrapper(object): def __init__(self, generator, input_order): self.generator = generator self.input_order = input_order def __call__(self, obj, filename): for item in self.generator(obj, filename): if isinstance(item, dict): yield [ item.get(input_name, None) for input_name in self.input_order ] else: yield item class CheckWrapper(object): def __init__(self, generator, input_types, check_fail_continue, logger): self.generator = generator self.input_types = input_types self.check_fail_continue = check_fail_continue self.logger = logger def __call__(self, obj, filename): for items in self.generator(obj, filename): try: assert len(items) == len(self.input_types) assert len(filter(lambda x: x is None, items)) == 0 for item, input_type in itertools.izip(items, self.input_types): callback = functools.partial(CheckWrapper.loop_callback, input_type) for _ in xrange(input_type.seq_type): callback = functools.partial(CheckWrapper.loop_check, callback) callback(item) yield items except AssertionError as e: self.logger.warning( "Item (%s) is not fit the input type with error %s" % (repr(item), repr(e))) if self.check_fail_continue: continue else: raise @staticmethod def loop_callback(input_type, each): assert isinstance(input_type, InputType) if input_type.type == DataType.Dense: assert isinstance(each, collections.Sequence) for d in each: assert isinstance(d, float) assert len(each) == input_type.dim elif input_type.type == DataType.Index: assert isinstance(each, int) assert each < input_type.dim elif input_type.type == DataType.SparseNonValue \ or input_type.type == DataType.SparseValue: assert isinstance(each, collections.Sequence) sparse_id = set() for k in each: if input_type.type == DataType.SparseValue: k, v = k assert isinstance(v, float) assert isinstance(k, int) assert k < input_type.dim sparse_id.add(k) assert len(sparse_id) == len(each) else: raise RuntimeError("Not support input type") @staticmethod def loop_check(callback, item): for each in item: callback(each) class CheckInputTypeWrapper(object): def __init__(self, generator, input_types, logger): self.generator = generator self.input_types = input_types self.logger = logger def __call__(self, obj, filename): for items in self.generator(obj, filename): try: # dict type is required for input_types when item is dict type assert (isinstance(items, dict) and \ not isinstance(self.input_types, dict))==False yield items except AssertionError as e: self.logger.error( "%s type is required for input type but got %s" % (repr(type(items)), repr(type(self.input_types)))) raise def provider(input_types=None, should_shuffle=None, pool_size=-1, min_pool_size=-1, can_over_batch_size=True, calc_batch_size=None, cache=CacheType.NO_CACHE, check=False, check_fail_continue=False, init_hook=None, **outter_kwargs): """ Provider decorator. Use it to make a function into PyDataProvider2 object. In this function, user only need to get each sample for some train/test file. The basic usage is: .. code-block:: python @provider(some data provider config here...) def process(settings, file_name): while not at end of file_name: sample = readOneSampleFromFile(file_name) yield sample. The configuration of data provider should be setup by\: :param input_types: Specify the input types, can also be set in init_hook. It could be a list of InputType object. For example, input_types=[dense_vector(9), integer_value(2)]. Or user can set a dict of InputType object, which key is data_layer's name. For example, input_types=\ {'img': img_features, 'label': label}. when using dict of InputType, user could yield a dict of feature values, which key is also data_layer's name. :type input_types: list|tuple|dict :param should_shuffle: True if data should shuffle. Pass None means shuffle when is training and not to shuffle when is testing. :type should_shuffle: bool :param pool_size: Max number of sample in data pool. :type pool_size: int :param min_pool_size: Set minimal sample in data pool. The PaddlePaddle will random pick sample in pool. So the min_pool_size effect the randomize of data. :type min_pool_size: int :param can_over_batch_size: True if paddle can return a mini-batch larger than batch size in settings. It is useful when custom calculate one sample's batch_size. It is very danger to set it to false and use calc_batch_size together. Default is true. :type can_over_batch_size: bool :param calc_batch_size: a method to calculate each sample's batch size. Default each sample's batch size is 1. But to you can customize each sample's batch size. :type calc_batch_size: callable :param cache: Cache strategy of Data Provider. Default is CacheType.NO_CACHE :type cache: int :param init_hook: Initialize hook. Useful when data provider need load some external data like dictionary. The parameter is (settings, file_list, \*\*kwargs). - settings. It is the global settings object. User can set settings.input_types here. - file_list. All file names for passed to data provider. - is_train. Is this data provider used for training or not. - kwargs. Other keyword arguments passed from trainer_config's args parameter. :type init_hook: callable :param check: Check the yield data format is as same as input_types. Enable this will make data provide process slow but it is very useful for debug. Default is disabled. :type check: bool :param check_fail_continue: Continue train or not when check failed. Just drop the wrong format data when it is True. Has no effect when check set to False. :type check_fail_continue: bool """ def __wrapper__(generator): class DataProvider(object): def __init__(self, file_list, **kwargs): self.logger = logging.getLogger("") self.logger.setLevel(logging.INFO) self.input_types = None self.should_shuffle = should_shuffle true_table = [1, 't', 'true', 'on'] false_table = [0, 'f', 'false', 'off'] if not isinstance(self.should_shuffle, bool) and \ self.should_shuffle is not None: if isinstance(self.should_shuffle, basestring): self.should_shuffle = self.should_shuffle.lower() if self.should_shuffle in true_table: self.should_shuffle = True elif self.should_shuffle in false_table: self.should_shuffle = False else: self.logger.warning( "Could not recognize should_shuffle (%s), " "just use default value of should_shuffle." " Please set should_shuffle to bool value or " "something in %s" % (repr(self.should_shuffle), repr(true_table + false_table))) self.should_shuffle = None self.pool_size = pool_size self.can_over_batch_size = can_over_batch_size self.calc_batch_size = calc_batch_size self.file_list = file_list self.generator = generator self.cache = cache self.min_pool_size = min_pool_size self.input_order = kwargs['input_order'] self.check = check if init_hook is not None: init_hook(self, file_list=file_list, **kwargs) if 'slots' in outter_kwargs: self.logger.warning('setting slots value is deprecated, ' 'please use input_types instead.') self.slots = outter_kwargs['slots'] if input_types is not None: self.slots = input_types if self.input_types is not None: self.slots = self.input_types assert self.slots is not None, \ "Data Provider's input_types must be set" assert self.generator is not None use_dynamic_order = False if isinstance(self.slots, dict): # reorder input_types self.slots = [self.slots[ipt] for ipt in self.input_order] use_dynamic_order = True if len(self.slots) == 1: self.generator = SingleSlotWrapper(self.generator) if use_dynamic_order: self.generator = InputOrderWrapper(self.generator, self.input_order) else: self.generator = CheckInputTypeWrapper( self.generator, self.slots, self.logger) if self.check: self.generator = CheckWrapper(self.generator, self.slots, check_fail_continue, self.logger) return DataProvider return __wrapper__ def deserialize_args(args): """ Internal use only. :param args: :return: """ return cPickle.loads(args)
19,161
34.354244
83
py
Paddle
Paddle-master/python/paddle/trainer/config_parser_extension.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paddle.proto.DataConfig_pb2 import DataConfig g_config = None def SimpleData(files=None, feat_dim=None, context_len=None, buffer_capacity=None): data_config = DataConfig() data_config.type = 'simple' data_config.files = files data_config.feat_dim = feat_dim if context_len is not None: data_config.context_len = context_len if buffer_capacity: data_config.buffer_capacity = buffer_capacity return data_config def get_config_funcs(trainer_config): global g_config g_config = trainer_config return dict(SimpleData=SimpleData)
1,246
30.175
74
py
Paddle
Paddle-master/python/paddle/trainer/PyDataProviderWrapper.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module provide a wrapper(decorator) to wrap a data process method into a PyDataProvider. Some examples are shown `here <data_provider/python_case.html>`_. """ import struct import array import random import gc import logging import pstats import sys import numpy import functools __all__ = [ 'DenseSlot', 'SlotType', 'SparseNonValueSlot', 'StringSlot', 'SparseValueSlot', 'IndexSlot', 'PoolSize', 'GeneralPyDataProvider', 'provider', 'init_hook_wrapper' ] try: # Just for profile mode, will try to import cProfile first. # Most python will contains cProfile, cProfile/profile are basically same. # ref: https://docs.python.org/2/library/profile.html#introduction-to-the-profilers import cProfile as profile except ImportError: import profile try: import cPickle as pickle except ImportError: import pickle import io class SlotType(object): # Just a hint for user. pass class DenseSlot(SlotType): """ Dense Slot Type: Each item is the value of a Dense Vector. Its yield format for :code:`provider` is: - **NonSeq**: [float, float, ... ] - **Seq**: [[float, float, ...], [float, float ....], ... ] - **SubSeq**: [[[float, float, ...], [float ....], ...] , \ [[float, float, ...], [float ....], ...] , ...] """ def __init__(self, dim): """ :param dim: slot dimension :type dim: int """ self.dim = dim self.type = 0 class SparseNonValueSlot(SlotType): """ Sparse NonValue Slot Type: Each item is the id of a Sparse Vector. Its yield format for :code:`provider` is: - **NonSeq**: [int, int, ...] - **Seq**: [[int, int, ...], [int, int, ...], ... ] - **SubSeq**: [[[int, int, ...], [int, ....], ...] , \ [[int, int, ...], [int, ....], ...] , ...] """ def __init__(self, dim): """ :param dim: slot dimension :type dim: int """ self.dim = dim self.type = 1 class SparseValueSlot(SlotType): """ Sparse Value Slot Type: Each item is the id and value of a Sparse Vector. Its yield format for :code:`provider` is: - **NonSeq**: [(int, float), (int, float), ... ] - **Seq**: [[(int,float), (int, float), ... ], \ [(int, float), (int, float), ...], ... ] - **SubSeq**: [[[(int,float), ...], [(int, float), ....], ...] , \ [[(int,float), ...], [(int, float), ....], ...] , ...] """ def __init__(self, dim): """ :param dim: slot dimension. :type dim: int """ self.dim = dim self.type = 2 class IndexSlot(SlotType): """ Index Value Slot Type: Each item is the id of Label. Its yield format for :code:`provider` is: - **NonSeq**: int - **Seq**: [int, int, ....] - **SubSeq**: [[int, int, ...], [int, int, ...], ... ] """ def __init__(self, dim): """ :param dim: slot dimension :type dim: int """ self.dim = dim self.type = 3 class StringSlot(SlotType): """ String Value Slot Type: Each item is a string for printout, \ can be used in DataLayer too. Its yield format for :code:`provider` is: - **NonSeq**: string - **Seq**: [string, string, ....] - **SubSeq**: [[string, string, ...], [string, string, ...], ... ] """ def __init__(self, dim): """ :param dim: slot dimension :type dim: string """ self.dim = dim self.type = 6 class SparseNonValueHandler(object): """ Private Class, Use for converting python object to paddle string. """ def __init__(self): self.offsets = [] self.value = [] self.offset_count = 0 def __call__(self, ele): """ It will be invoked when scan each sparse data. :param ele: list of sparse data, maybe non-value [ idx, ... ] or value. [ (idx, val), ... ] :type ele: list """ self.offsets.append(self.offset_count) self.offset_count += len(ele) self.processElement(ele) def processElement(self, ele): """ Process for element list. See __call__ for more document. """ self.value += ele def done(self, data_stream, int_packer): """ Dump data to stream. :param data_stream: Output Stream. :param int_packer: A struct.Struct("i") object """ data_stream.write(array.array("i", self.offsets).tostring()) data_stream.write(int_packer.pack(self.offset_count)) data_stream.write(array.array("i", self.value).tostring()) class SparseValueHandler(SparseNonValueHandler): """ Private class, use for converting python obj to paddle string. """ def __init__(self): SparseNonValueHandler.__init__(self) self.weight = [] def processElement(self, ele): for idx, w in ele: self.value.append(idx) self.weight.append(w) def done(self, data_stream, int_packer): SparseNonValueHandler.done(self, data_stream, int_packer) data_stream.write(int_packer.pack(self.offset_count)) data_stream.write(array.array("f", self.weight).tostring()) class StringHandler(object): """ Private Class, Use for converting python object to paddle string. """ def __init__(self, data_stream, int_packer): self.data_stream = data_stream self.int_packer = int_packer def __call__(self, ele): """ It will be invoked when scan each string data. :param ele: string data :type ele: str """ self.data_stream.write(self.int_packer.pack(len(ele))) self.data_stream.write(array.array("c", ele).tostring()) class GeneralPyDataProvider: def __init__(self, *file_list, **kwargs): """ :param file_list: input file_list """ del kwargs # unused gc.disable() assert isinstance(self.logger, logging.Logger) self.use_seq_flag = hasattr(self, "use_seq_flag") and self.use_seq_flag self.slots_num = len(self.getSlots()) self.file_list = list(file_list) self.generators = map(self.generateData, self.file_list) self.int_packer = struct.Struct("i") self.head_packer = struct.Struct("ii") self.float_packer = struct.Struct("f") self.shuffler = lambda *args, **kwargs: None self.data_pool = [] self.has_subseq = [] self.has_checked = False self.debug = hasattr(self, "debug") and self.debug if hasattr(self, "profile_filename") and isinstance( self.profile_filename, str): self.profile_count = 0 self.is_profile = True else: self.is_profile = False if not hasattr(self, "file_count") or not isinstance(self.file_count, int): self.file_count = sys.maxint if not hasattr(self, "can_over_batch_size"): self.can_over_batch_size = True elif not self.can_over_batch_size: self.logger.warn( "User should ensure every data size is not larger than batch" " size when can_over_batch_size = False") self.data_pool_idx = 0 def reset(self): """Reset all data in provider.""" self.logger.debug("reset dataprovider.") self.generators = map(self.generateData, self.file_list) self.shuffler = lambda *args, **kwargs: None self.data_pool = [] self.data_pool_idx = 0 if self.file_count != 0: self.max_pool_size = 0 # When use Profile, each pass will print a profile result. if self.is_profile: if hasattr(self, "profiler") and isinstance(self.profiler, profile.Profile): self.profiler.disable() fn = "%s_%d" % (self.profile_filename, self.profile_count) sortby = "cumulative" with open(fn, "w") as f: pstats.Stats( self.profiler, stream=f).sort_stats(sortby).print_stats() self.logger.info("saving profile to file %s" % fn) self.profile_count += 1 self.logger.info("resetting profile") self.profiler = profile.Profile() self.profiler.enable() def shuffle(self): """ shuffle data""" if not self.should_shuffle: return else: self.logger.debug("shuffling data.") random.shuffle(self.generators) self.shuffler = random.shuffle def getSlots(self): """ :return : return a list of SlotType :rtype: list """ return [] def generateData(self, fn): """ :param fn: file name :return: a generator to yield data one by one. """ raise NotImplementedError def calculateDataBatchSize(self, data): """ :param data: One sample which yield by generateData :type data: list :return: The batch size that the data contribute. :rtype: int """ return 1 def getHeader(self): """return paddle header format""" ret = self.head_packer.pack(self.slots_num, self.use_seq_flag) for obj in self.getSlots(): ret += self.head_packer.pack(obj.type, obj.dim) return ret def getHeaderNative(self): return self.use_seq_flag, self.getSlots() def getNextBatchNative(self, batch_size): ret_list = [] self.__prepareData(batch_size, ret_list) return ret_list def getNextBatch(self, batch_size): """ :param batch_size: the batch_size approximately return. :return: return paddle pyDataProvider format, just see documents. :rtype: str NOTE: If can_over_batch_size is True, the return batch_size >= input batch_size. Otherwise, the return batch_size < input batch_size, BUT USER MUST ENSURE THAT each data's batch size is less than input batch_size. """ ret_list = [] current_batch_size = self.__prepareData(batch_size, ret_list) # create unified format for ret_list with differnt slots_num if self.slots_num == 1: ret_list = [ret_list] if current_batch_size == 0: return self.int_packer.pack(current_batch_size) data_bytes = io.BytesIO() seq_bytes = io.BytesIO() subseq_bytes = io.BytesIO() data_stream = io.BufferedWriter(data_bytes) seq_stream = io.BufferedWriter(seq_bytes) subseq_stream = io.BufferedWriter(subseq_bytes) def convertDataImpl(idx, data_callback): """ This method will handle sequence in return data. invoke data_callback one by one. :param idx: the slot index. :param data_callback: a callback, which type is (each sample) => None. """ indices = 0 slot_sample_num = len(ret_list) if self.use_seq_flag: slot_sample_num = 0 if self.has_subseq[idx]: # has sub-sequence slot_subseq_num = 0 for dat in ret_list: dat = dat[idx] slot_subseq_num += len(dat) for sub_dat in dat: slot_sample_num += len(sub_dat) subseq_stream.write(self.int_packer.pack(slot_subseq_num)) else: for dat in ret_list: dat = dat[idx] slot_sample_num += len(dat) seq_stream.write(self.int_packer.pack(len(ret_list))) data_stream.write(self.int_packer.pack(slot_sample_num)) for dat in ret_list: dat = dat[idx] if self.use_seq_flag: seq_stream.write(self.int_packer.pack(indices)) if self.has_subseq[idx]: # has sub-sequence for sub_dat in dat: writeDataStream(sub_dat, data_callback) subseq_stream.write(self.int_packer.pack(indices)) indices += len(sub_dat) else: writeDataStream(dat, data_callback) indices += len(dat) else: writeDataStream(dat, data_callback) def writeDataStream(dat, data_callback): if self.use_seq_flag > 0: if data_callback is None: # Special for index slot data_stream.write(array.array("i", dat).tostring()) else: for ele in dat: data_callback(ele) else: if data_callback is None: # Special for index slot data_stream.write(self.int_packer.pack(dat)) else: data_callback(dat) try: for i in range(self.slots_num): slot = self.getSlots()[i] # According to the data_type, each slot data will be converted to binary if isinstance(slot, DenseSlot): convertDataImpl(i, lambda e: data_stream.write( array.array("f", e).tostring())) elif isinstance(slot, SparseNonValueSlot): handler = SparseNonValueHandler() convertDataImpl(i, handler) handler.done(data_stream, self.int_packer) elif isinstance(slot, SparseValueSlot): handler = SparseValueHandler() convertDataImpl(i, handler) handler.done(data_stream, self.int_packer) elif isinstance(slot, IndexSlot): convertDataImpl(i, None) elif isinstance(slot, StringSlot): handler = StringHandler(data_stream, self.int_packer) convertDataImpl(i, handler) else: raise RuntimeError("The data_type must be 0/1/2/3/6") data_stream.flush() seq_stream.flush() subseq_stream.flush() return "".join([ self.int_packer.pack(current_batch_size), data_bytes.getvalue(), seq_bytes.getvalue(), subseq_bytes.getvalue() ]) finally: data_stream.close() seq_stream.close() subseq_stream.close() data_bytes.close() seq_bytes.close() subseq_bytes.close() def hasSubseq(self, ret_list): # create unified format for ret_list with differnt slots_num if self.slots_num == 1: ret_list = [ret_list] # decide whether slot has sub-sequence using its first sample for i in range(self.slots_num): slot = self.getSlots()[i] dat = ret_list[0][i][0] if isinstance(slot, IndexSlot) or isinstance(slot, StringSlot): if isinstance(dat, list) or isinstance(dat, numpy.ndarray): self.has_subseq.append(1) # has_subseq = True continue elif isinstance(dat[0], list) or isinstance(dat[0], numpy.ndarray): self.has_subseq.append(1) # has_subseq = True continue self.has_subseq.append(0) # has_subseq = False def checkOrder(self): first_noSubseq_slot = self.slots_num last_subseq_slot = -1 for i in range(self.slots_num): if not self.has_subseq[i]: first_noSubseq_slot = i break for i in range(self.slots_num): if self.has_subseq[i]: last_subseq_slot = i if first_noSubseq_slot < last_subseq_slot: raise RuntimeError( "slot hasSubseq must put before than slot without subseq") self.has_checked = True def __prepareData(self, batch_size, ret_list): current_batch_size = 0 could_exit = False while not could_exit: if len(self.data_pool) == 0: self.data_pool_idx = 0 self.fillPool() if len(self.data_pool) != 0: for idx in xrange(self.data_pool_idx, len(self.data_pool)): current_batch_size += self.calculateDataBatchSize( self.data_pool[idx]) if current_batch_size >= batch_size: could_exit = True break if current_batch_size > batch_size and not self.can_over_batch_size: # if cannot over batch size current_batch_size -= self.calculateDataBatchSize( self.data_pool[idx]) idx -= 1 ret_list += self.data_pool[self.data_pool_idx:idx + 1] # for speed reason, just shift left index, not delete data actually. self.data_pool_idx = idx + 1 if self.data_pool_idx == len(self.data_pool): self.data_pool = [] else: break if self.use_seq_flag and not self.has_checked: # compute self.has_subseq and checkOrder only at first time self.hasSubseq(ret_list) self.checkOrder() return current_batch_size def fillPool(self): """ Fill the pool to max_pool_size. If max_pool_size is None, then read file_count to pool. """ if self.max_pool_size == 0: for i in xrange(min(self.file_count, len(self.generators))): self.data_pool += list(self.generators[i]) self.generators = self.generators[min(self.file_count, len(self.generators)):] self.max_pool_size = len(self.data_pool) else: while len(self.data_pool) < self.max_pool_size and len( self.generators) != 0: try: self.data_pool.append(self.generators[0].next()) except StopIteration: self.generators.pop(0) self.shuffler(self.data_pool) class PoolSize(object): """Max number of sample which contains in provider.""" def __init__(self, pool_size): self.size = pool_size def default_init_hook(cls, *args, **kwargs): """ default hook, do nothing """ del cls, args, kwargs def provider(slots=None, use_seq=False, should_shuffle=True, pool_size=1, can_over_batch_size=True, calc_batch_size=lambda data: 1, debug=False, init_hook=default_init_hook, profile_filename=None): """ The decorator for PyDataProvider. User should use this to create Provider class. User should only concern how to read sample from file. So the basic usage is: .. code-block:: python @provider(some data provider config here...) def process(obj, file_name): while not at end of file_name: sample = readOneSampleFromFile(file_name) yield sample. The configuration of data provider should be setup by: :param init_hook: A callback will be invoked when PyDataProvider instance \ created. The parameter is (obj, \*args, \*\*kwargs). - **obj**: actually data provider instance, which \ contains some global objects in obj.xxxxx, \ and is used by process function. 1. **obj.slots**: a list of SlotType Object. Can be \ set in init. For example, obj.slots = \ [DenseSlot(9), IndexSlot(2)]. 2. **obj.logger**: a logger object. User can invoke \ obj.logger.info(), obj.logger.fatal(), etc. - **args** and **kwargs**: the data provider __init__ \ parameters. For example, load_data_args \ will be found in \*\*kwargs, \ and if you want to recieve \ it from trainer_config, \ recommand to use init_hook_wrapper :type init_hook: callable :param pool_size: - **int**: it will read at most pool_size files to memory. - **PoolSize**: it will read at most PoolSize.size samples to memory. - If not set, it will read all the files to memory. :type pool_size: int | PoolSize :param slots: Specify the SlotTypes, can also be set in init_hook. It has two formats: - A list of SlotType objects. For example, slots = \ [DenseSlot(9), IndexSlot(2)]. - A method return a list of SlotTypes, and the parameter of \ method is (obj, \*file_list, \*\*kwargs). :type slots: list | callable :param use_seq: False if use no sequence (Default). True if use sequence: - If sequence has **no sub-sequence**: Each slot will \ return a list of data. This list is one sequence. \ So the return format likes \ [[a0, a1, a2], [b1, b2, b3, b4], [c1]]. - If sequence has **sub-sequence**: Each slot will return \ a nested-list of data. This list contains several \ sub-lists, each sub-list is one sub-sequence. \ So the return format likes \ [[[a0, a1, a2], [a4, a5]], [[b1, b2, b3, b4], [b5, b6]], [[c1], [c2]]]. :type use_seq: bool :param should_shuffle: True if data should shuffle. :type should_shuffle: bool :param calc_batch_size: The method calculate each data's batch size. - Default is the batch size of one sample. - User can customize by **lamda** funtion. For example, \ :code:`calc_batch_size = lambda data : len(data)` \ means calculating the token number of a sequence data. :type calc_batch_size: callable :param can_over_batch_size: Whether :code:`actual batch size >= input batch size` - **True** (>=): getNextBatch method can return more data (Default). - **False** (<): user must ensure that each data's batch size < input batch size. :type can_over_batch_size: bool :param debug: True if enable debug logger and some debug check. Default is False. :type debug: bool :param profile_filename: None if disable profile (Default). Otherwise, \ the data provider will dump profile result when \ reset. And the dump filename is \ **<profile_filename>_<reset_count>**. :type profile_filename: None | Str """ def _wrapper(handler): class Cls(GeneralPyDataProvider): """ Real PyDataProvider Class. """ def __init__(self, *file_list, **kwargs): logging.basicConfig( format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)s]" " %(message)s") self.logger = logging.getLogger("") if debug: self.logger.setLevel(logging.DEBUG) self.logger.debug("Running pydataprovider in debug mode.") else: self.logger.setLevel(logging.INFO) init_hook(self, *file_list, **kwargs) if callable(slots): self.slots = slots(self, *file_list, **kwargs) elif slots is not None: self.slots = slots if isinstance(pool_size, int): self.max_pool_size = 0 self.file_count = pool_size elif isinstance(pool_size, PoolSize): self.max_pool_size = pool_size.size self.file_count = 0 else: raise RuntimeError self.can_over_batch_size = can_over_batch_size self.debug = debug self.profile_filename = profile_filename self.use_seq_flag = use_seq self.should_shuffle = should_shuffle GeneralPyDataProvider.__init__(self, *file_list, **kwargs) def getSlots(self): return self.slots def generateData(self, f): return handler(self, f) def calculateDataBatchSize(self, data): return calc_batch_size(data) return Cls return _wrapper def init_hook_wrapper(func): """ Wrap a method for PyDataProviderWrapper's init_hook. This method can receive parameter from trainer_config's load_data_args. The load_data_args must pass a pickle.dumps() value, and dump a map as keyword args. The wrapped method :code:`func` will receive them as keyword args. So an example usage is: .. code-block:: python @init_hook_wrapper def hook(obj, dictionary, file_list, **kwargs): obj.dictionary = dictionary obj.slots = [IndexSlot(len(obj.dictionary)), IndexSlot(len(open(file_list[0], "r").readlines()))] :param func: init_hook function :type func: callable :return: wrapped method, can be passed into @provider. """ @functools.wraps(func) def wrapper(obj, *file_list, **kwargs): args = kwargs.get("load_data_args", dict()) if isinstance(args, basestring): args = pickle.loads(args) args['file_list'] = file_list func(obj=obj, **args) return wrapper
27,255
35.341333
115
py
Paddle
Paddle-master/python/paddle/trainer/recurrent_units.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # recurrent_units.py # Version 2.0 # # Some recurrent units can be used in recurrent layer group, # to use these units, import this module in your config_file: # import trainer.recurrent_units # # The modules in this file are DEPRECATED. # If you would like to use lstm/gru # please use the functions defined in paddle.trainer_config_helpers. from paddle.trainer.config_parser import * # long short term memory, can be used in recurrent machine # *inputs* must be a list of Projections, for example: # inputs = [FullMatrixProjection("input_layer_name")], # *para_prefix* defines parameter names, if the *para_prefix* of # two LstmRecurrentUnit is same, they share same parameters # *out_memory* can be defined outside if it's used outside def LstmRecurrentUnit(name, size, active_type, state_active_type, gate_active_type, inputs, para_prefix=None, error_clipping_threshold=0, out_memory=None): if para_prefix is None: para_prefix = name if out_memory is None: out_memory = Memory(name=name, size=size) state_memory = Memory(name=name + "_" + "state", size=size) Layer( name=name + "_" + "input_recurrent", type="mixed", size=size * 4, #(input_s, input_gate, forget_gate, output_gate) error_clipping_threshold=error_clipping_threshold, bias=Bias( initial_std=0, parameter_name=para_prefix + "_input_recurrent.b"), inputs=inputs + [ FullMatrixProjection( out_memory, parameter_name=para_prefix + "_input_recurrent.w"), ], ) LstmStepLayer( name=name, size=size, bias=Bias(parameter_name=para_prefix + "_check.b"), inputs=[name + "_" + "input_recurrent", state_memory], active_type=active_type, active_gate_type=gate_active_type, active_state_type=state_active_type, ) GetOutputLayer( name=name + "_" + "state", size=size, inputs=Input( name, input_layer_argument="state"), ) def LstmRecurrentUnitNaive(name, size, active_type, state_active_type, gate_active_type, inputs, para_prefix=None, error_clipping_threshold=0, out_memory=None): if para_prefix is None: para_prefix = name if out_memory is None: out_memory = Memory(name=name, size=size) state_memory = Memory(name=name + "_" + "state", size=size) Layer( name=name + "_" + "input_recurrent", type="mixed", size=size * 4, #(input_s, input_gate, forget_gate, output_gate) error_clipping_threshold=error_clipping_threshold, bias=Bias( initial_std=0, parameter_name=para_prefix + "_input_recurrent.b"), inputs=inputs + [ FullMatrixProjection( out_memory, parameter_name=para_prefix + "_input_recurrent.w"), ], ) ExpressionLayer( name=name + "_" + "input_s", size=size, active_type=active_type, inputs=[ IdentityOffsetProjection( name + "_" + "input_recurrent", offset=0) ], ) ExpressionLayer( name=name + "_" + "input_gate", active_type=gate_active_type, inputs=[ IdentityOffsetProjection( name + "_" + "input_recurrent", offset=size), DotMulProjection( state_memory, parameter_name=para_prefix + "_input_check.w") ], ) ExpressionLayer( name=name + "_" + "forget_gate", active_type=gate_active_type, inputs=[ IdentityOffsetProjection( name + "_" + "input_recurrent", offset=size * 2), DotMulProjection( state_memory, parameter_name=para_prefix + "_forget_check.w") ], ) ExpressionLayer( name=name + "_" + "state", inputs=[ DotMulOperator([name + "_" + "input_s", name + "_" + "input_gate"]), DotMulOperator([state_memory, name + "_" + "forget_gate"]), ], ) ExpressionLayer( name=name + "_" + "output_gate", active_type=gate_active_type, inputs=[ IdentityOffsetProjection( name + "_" + "input_recurrent", offset=size * 3), DotMulProjection( name + "_" + "state", parameter_name=para_prefix + "_output_check.w") ], ) ExpressionLayer( name=name + "_" + "state_atv", active_type=state_active_type, inputs=IdentityProjection(name + "_" + "state"), ) ExpressionLayer( name=name, inputs=DotMulOperator( [name + "_" + "state_atv", name + "_" + "output_gate"]), ) # like LstmRecurrentUnit, but it's a layer group. # it is equivalent to LstmLayer def LstmRecurrentLayerGroup(name, size, active_type, state_active_type, gate_active_type, inputs, para_prefix=None, error_clipping_threshold=0, seq_reversed=False): input_layer_name = name + "_" + "transform_input" Layer( name=input_layer_name, type="mixed", size=size * 4, active_type="", bias=False, inputs=inputs, ) RecurrentLayerGroupBegin( name + "_layer_group", in_links=[input_layer_name], out_links=[name], seq_reversed=seq_reversed) LstmRecurrentUnit( name=name, size=size, active_type=active_type, state_active_type=state_active_type, gate_active_type=gate_active_type, inputs=[IdentityProjection(input_layer_name)], para_prefix=para_prefix, error_clipping_threshold=error_clipping_threshold, ) RecurrentLayerGroupEnd(name + "_layer_group") # gated recurrent unit, can be used in recurrent machine # *inputs* should be a list of Projections, for example: # inputs = [FullMatrixProjection("input_layer_name")], # *para_prefix* defines parameter names, if the *para_prefix* of # two GatedRecurrentUnit is same, they share same parameters # *out_memory* can be defined outside if it's used outside def GatedRecurrentUnit(name, size, active_type, gate_active_type, inputs, para_prefix=None, error_clipping_threshold=0, out_memory=None): if type_of(inputs) == str: #only used by GatedRecurrentLayerGroup input_layer_name = inputs else: input_layer_name = name + "_" + "transform_input" Layer( name=input_layer_name, type="mixed", size=size * 3, active_type="", bias=False, inputs=inputs, ) if para_prefix is None: para_prefix = name if out_memory is None: out_memory = Memory(name=name, size=size) GruStepLayer( name=name, size=size, bias=Bias(parameter_name=para_prefix + "_gate.b"), inputs=[ input_layer_name, Input( out_memory, parameter_name=para_prefix + "_gate.w") ], active_type=active_type, active_gate_type=gate_active_type, ) def GatedRecurrentUnitNaive(name, size, active_type, gate_active_type, inputs, para_prefix=None, error_clipping_threshold=0, out_memory=None): if type_of(inputs) == str: #only used by GatedRecurrentLayerGroup input_layer_name = inputs else: input_layer_name = name + "_" + "transform_input" Layer( name=input_layer_name, type="mixed", size=size * 3, active_type="", bias=False, inputs=inputs, ) if para_prefix is None: para_prefix = name if out_memory is None: out_memory = Memory(name=name, size=size) Layer( name=name + "_" + "update_gate", type="mixed", size=size, active_type=gate_active_type, error_clipping_threshold=error_clipping_threshold, bias=Bias( initial_std=0, parameter_name=para_prefix + "_update_gate.b"), inputs=[ IdentityOffsetProjection( input_layer_name, offset=0), FullMatrixProjection( out_memory, parameter_name=para_prefix + "_update_gate.w") ], ) Layer( name=name + "_" + "reset_gate", type="mixed", size=size, active_type=gate_active_type, error_clipping_threshold=error_clipping_threshold, bias=Bias( initial_std=0, parameter_name=para_prefix + "_reset_gate.b"), inputs=[ IdentityOffsetProjection( input_layer_name, offset=size), FullMatrixProjection( out_memory, parameter_name=para_prefix + "_reset_gate.w") ], ) ExpressionLayer( name=name + "_" + "reset_output", inputs=DotMulOperator([out_memory, name + "_" + "reset_gate"]), ) Layer( name=name + "_" + "output_candidate", type="mixed", size=size, active_type=active_type, error_clipping_threshold=error_clipping_threshold, bias=Bias( initial_std=0, parameter_name=para_prefix + "_output_candidate.b"), inputs=[ IdentityOffsetProjection( input_layer_name, offset=size * 2), FullMatrixProjection( name + "_" + "reset_output", parameter_name=para_prefix + "_output_candidate.w") ], ) ExpressionLayer( #element-wise interpolation name=name, inputs=[ IdentityProjection(out_memory), DotMulOperator( [out_memory, name + "_" + "update_gate"], scale=-1.0), DotMulOperator( [name + "_" + "output_candidate", name + "_" + "update_gate"]), ], ) # like GatedRecurrentUnit, but it's a layer group. # it is equivalent to GatedRecurrentLayer. def GatedRecurrentLayerGroup(name, size, active_type, gate_active_type, inputs, para_prefix=None, error_clipping_threshold=0, seq_reversed=False): input_layer_name = name + "_" + "transform_input" Layer( name=input_layer_name, type="mixed", size=size * 3, active_type="", bias=False, inputs=inputs, ) RecurrentLayerGroupBegin( name + "_layer_group", in_links=[input_layer_name], out_links=[name], seq_reversed=seq_reversed) GatedRecurrentUnit( name=name, size=size, active_type=active_type, gate_active_type=gate_active_type, inputs=input_layer_name, #transform outside para_prefix=para_prefix, error_clipping_threshold=error_clipping_threshold, ) RecurrentLayerGroupEnd(name + "_layer_group")
12,381
33.586592
80
py
Paddle
Paddle-master/python/paddle/trainer/config_parser.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function ''' The following functions are available in the config file: Bias: define bias. To be used as value of bias argument in Layer(). Data: define data provider. Input: define input layer for a layer. To be used as element of inputs argument in Layer(). Conv: define a convolution operation for an input of a layer. Norm: define a normalization operation for an input of a layer. Pool: define a pooling operation for an input of a layer. Layer: define a layer. Parameter: define a parameter. Import: import another config file. If the imported config file name is a relative path, then it will be searched under the directory of the current config file. Inputs(layer_names...): Define the name of the input layers of the NeuralNetwork. The type of these layers must be "data". These layers will be provided with the DataBatch obtained from DataProvider. The data streams from DataProvider must have the same order. Outputs(layer_names...): Define the name of the output layers of the NeuralNetwork. Usually the output is simply the cost layer. You can specify other layers as outputs and calculate the cost (and its derivative) yourself. default_initial_std(val) default_initial_mean(val) default_momentum(val): default_decay_rate(val): Set the default value for these parameters get_config_arg(name, type, default): Get the value for a config parameter. *** customized extension to config_parser *** The functionality of the config_parser can be extended. If the config_arg_str for parse_config() contains extension_module_name=[MODULE_NAME], then config_parser will call MODULE_NAME.get_config_funcs(g_config) MODULE_NAME.get_config_funcs() should return a dictionary of name to functions, those functions will be available in the config file. See trainer/tests/config_parser_test.py for example To use this from paddle_trainer, paddle_trainer should be called with --config_args=extension_module_name=[MODULE_NAME] ''' import copy import logging import os import sys import traceback import math import shutil try: from paddle.proto.DataConfig_pb2 import DataConfig from paddle.proto.ModelConfig_pb2 import ModelConfig from paddle.proto.ModelConfig_pb2 import LayerConfig from paddle.proto.ModelConfig_pb2 import LayerInputConfig from paddle.proto.ModelConfig_pb2 import ProjectionConfig from paddle.proto.ModelConfig_pb2 import OperatorConfig from paddle.proto.ModelConfig_pb2 import GeneratorConfig from paddle.proto.ModelConfig_pb2 import LinkConfig from paddle.proto.ParameterConfig_pb2 import ParameterConfig from paddle.proto.ParameterConfig_pb2 import ParameterUpdaterHookConfig from paddle.proto.TrainerConfig_pb2 import TrainerConfig except Exception as e: traceback.print_exc() raise logging.basicConfig( format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', ) logger = logging.getLogger('paddle') logger.setLevel(logging.INFO) __real_print__ = print print = logger.info # from layer type name to layer class g_layer_type_map = {} # Initialize global variables. We use this function so that we can # call parse_config() multiple times def init_config_environment( g_default_momentum=None, g_default_decay_rate=None, g_default_initial_mean=0., g_default_initial_std=0.01, g_default_num_batches_regularization=None, g_default_initial_strategy=0, g_default_initial_smart=False, g_default_gradient_clipping_threshold=None, g_default_device=None, g_default_update_hooks=None, g_default_compact_func=None, g_config=TrainerConfig(), g_layer_map={}, g_parameter_map={}, g_parameter_initializer_map={}, g_extended_config_funcs={}, # store command args of paddle_trainer g_command_config_args={}, # Used for PyDataProvider to avoid duplicate module name g_py_module_name_list=[], g_current_submodel=None, g_root_submodel=None, g_submodel_map={}, g_submodel_stack=[], g_add_submodel_suffix=False, ): # directly iterate through locals().iteritems() will change # the size of locals() due to introducing k, v into scope # which will break the process in some env local_vars = copy.deepcopy(locals()) for k, v in local_vars.iteritems(): globals()[k] = v # Because type is widely used as a variable name in this code. # we need a different function name for the builtin type() def type_of(x): return type(x) # Check a condition derived config file def config_assert(b, msg): if not b: logger.fatal(msg) g_config_funcs = {} # decorator for indicating a function which can be used in config file def config_func(func): g_config_funcs[func.func_name] = func return func # decorator for indicating a class which can be used in config file def config_class(cls): g_config_funcs[cls.__name__] = cls return cls # decorator for indicating a class for a layer type def config_layer(layer_type): def wrap(cls): g_config_funcs[cls.__name__] = cls g_layer_type_map[layer_type] = cls return cls return wrap def gen_parameter_name(layer_name, input_index): return '_%s.w%d' % (layer_name, input_index) def gen_bias_parameter_name(layer_name): return '_%s.wbias' % layer_name def default(x, default_value): return default_value if x is None else x class Cfg(object): def add_keys(self, locals): for k, v in locals.iteritems(): if not k.startswith('_'): self.__setattr__(k, v) # functions available in config file # Define the name of the input layers of the NeuralNetwork. # The type of these layers must be "data". # These layers will be provided with the DataBatch obtained # from DataProvider. The data streams from DataProvider must # have the same order. @config_func def Inputs(*args): for name in args: name = MakeLayerNameInSubmodel(name) global g_current_submodel, g_root_submodel if g_current_submodel.is_recurrent_layer_group: config_assert(False, "Do not set Inputs in recurrent layer group") else: g_current_submodel.input_layer_names.append(name) if g_current_submodel is g_root_submodel: g_config.model_config.input_layer_names.append(name) @config_func def HasInputsSet(): return len(g_current_submodel.input_layer_names) != 0 # Define the name of the output layers of the NeuralNetwork. # Usually the output is simply the cost layer. # You can specify other layers as outputs and calculate the # cost (and its derivative) yourself. @config_func def Outputs(*args): for name in args: name = MakeLayerNameInSubmodel(name) global g_current_submodel, g_root_submodel if g_current_submodel.is_recurrent_layer_group: config_assert(False, "Do not set Outputs in recurrent layer group") else: g_current_submodel.output_layer_names.append(name) if g_current_submodel is g_root_submodel: g_config.model_config.output_layer_names.append(name) @config_func def SubModelBegin(name): global g_current_submodel, g_root_submodel, g_submodel_stack g_submodel_stack.append(g_current_submodel) name = MakeLayerNameInParentSubmodel(name) #rename in nested submodel config_assert(name not in g_submodel_map, 'Duplicated submodel name: %s' % name) sub_model = g_config.model_config.sub_models.add() sub_model.name = name g_submodel_map[name] = sub_model g_current_submodel = sub_model @config_func def SubModelEnd(name=None): global g_current_submodel, g_root_submodel, g_submodel_stack config_assert(g_current_submodel is not g_root_submodel, "submodel not begin") if name is not None: config_assert( g_current_submodel.name == MakeLayerNameInParentSubmodel(name), "submodel name error") g_current_submodel = g_submodel_stack.pop() def MakeLayerNameInParentSubmodel(name): suffix = "" if len(g_submodel_stack) > 1: suffix = "@" + g_submodel_stack[-1].name return name + suffix def GetLayerBaseName(name): return name.split('@')[0] def MakeLayerNameInSubmodel(name, submodel_name=None): global g_current_submodel global g_add_submodel_suffix if (submodel_name is None and not g_add_submodel_suffix and not g_current_submodel.is_recurrent_layer_group): return name if submodel_name is None: submodel_name = g_current_submodel.name return name + "@" + submodel_name # Define a recurrent layer group begin with RecurrentLayerGroupBegin # and end with RecurrentLayerGroupEnd. # A recurrent layer group forward/backward one frame after previous frame # forward/backward through all layers in layer group. # in_links are names of layer used as input layer in the layer group. # out_links are names of layer in layer group used as outside layer's input. # # If generator is set, the layer group need one or more than one outlinks. # The first outlink should always be the generated token ids. # If generator.num_results_per_sample is not set, the output for one sample is # a ids sequence. Else if num_results_per_sample is more than one, # the output for one sample is up to #num_results_per_sample generated # sequences, which are packed in one sequence in output ids vector. Each # generated sequence has a generation probability. The probabilities for one # sample are stored in one row of output value matrix. # Packed generated sequences format, for each i: # seq_i_length: one interger, seq_i content length, # [seq_i content], length = seq_i_length # seq_i_end_mark: one interger, for format check, always -1 # You can use "seq_text_printer" to print the output of the generator. @config_func def RecurrentLayerGroupWithoutOutLinksBegin(name, in_links, seq_reversed=False, target_inlinkname=""): global g_current_submodel config_assert(g_config.model_config.type == "recurrent_nn", "RecurrentLayerGroup should be used only in recurrent_nn") RecurrentLayerGroup(name=name) # add to father model SubModelBegin(name) g_current_submodel.is_recurrent_layer_group = True g_current_submodel.reversed = seq_reversed in_links_count = 0 for linkid, link in enumerate(in_links): if isinstance(link, basestring): name = link else: name = link.link_name in_links_count += 1 layer_name = MakeLayerNameInParentSubmodel(name) layer = g_layer_map[layer_name] ScatterAgentLayer( name=name, size=layer.size, width=layer.width, height=layer.height) pair = g_current_submodel.in_links.add() pair.layer_name = layer_name pair.link_name = MakeLayerNameInSubmodel(name) @config_func def RecurrentLayerGroupSetOutLink(link): if isinstance(link, basestring): name = link else: name = link.link_name layer_name = MakeLayerNameInParentSubmodel(name) pair = g_current_submodel.out_links.add() pair.layer_name = MakeLayerNameInSubmodel(name) pair.link_name = layer_name def RecurrentLayerGroupSetGenerator(generator=None): generator.eos_layer_name = MakeLayerNameInSubmodel(generator.eos_layer_name) g_current_submodel.generator.CopyFrom(generator) @config_func def RecurrentLayerGroupBegin(name, in_links, out_links, generator=None, target_inlinkname="", seq_reversed=False): RecurrentLayerGroupWithoutOutLinksBegin(name, in_links, seq_reversed) for link in out_links: RecurrentLayerGroupSetOutLink(link) if generator is not None: RecurrentLayerGroupSetGenerator(generator) config_assert( len(in_links) == 0, "no in_links should be passed to generator") config_assert( len(out_links) >= 1, "one or more than one out_links should be passed to generator") @config_func def RecurrentLayerGroupEnd(name): global g_current_submodel config_assert(g_current_submodel.is_recurrent_layer_group, "RecurrentLayerGroup not begin") for pair in g_current_submodel.memories: #check exist layer = g_layer_map[pair.layer_name] config_assert(layer is not None, "memory declare wrong name:%s" % pair.layer_name) memory_link = g_layer_map[pair.link_name] config_assert(layer.size == memory_link.size, "memory declare wrong size:%d" % memory_link.size) prev_submodel = g_current_submodel SubModelEnd(name) for pair in prev_submodel.out_links: layer = g_layer_map[pair.layer_name] # add out agent to father model agent_name = GetLayerBaseName(pair.link_name) if prev_submodel.HasField("generator"): DataLayer(name=agent_name, size=layer.size) else: GatherAgentLayer(name=agent_name, size=layer.size) # Define the model type # currently, the paddle supports "nn", "recurrent_nn", "recursive_nn" and "multi_nn" @config_func def model_type(name): g_config.model_config.type = name @config_class class Bias(Cfg): def __init__(self, parameter_name=None, learning_rate=None, momentum=None, decay_rate=None, decay_rate_l1=None, initial_mean=None, initial_std=None, initial_strategy=None, initial_smart=None, num_batches_regularization=None, sparse_remote_update=None, gradient_clipping_threshold=None, is_static=None, is_shared=None, initializer=None): self.add_keys(locals()) # Define one input for a layer @config_class class Input(Cfg): def __init__( self, input_layer_name, parameter_name=None, initializer=None, learning_rate=None, momentum=None, decay_rate=None, decay_rate_l1=None, initial_mean=None, initial_std=None, initial_strategy=None, initial_smart=None, num_batches_regularization=None, sparse_remote_update=None, sparse_update=None, gradient_clipping_threshold=None, conv=None, bilinear_interp=None, norm=None, pool=None, image=None, block_expand=None, maxout=None, spp=None, pad=None, upsample=None, format=None, nnz=None, is_static=None, is_shared=None, update_hooks=None, input_layer_argument=None, make_layer_name_in_submodel=True, ): """ @param make_layer_name_in_submodel True by defalut, you might need to set it carefully when adding Input in config_parser.py. """ self.add_keys(locals()) self.input_layer_name = MakeLayerNameInSubmodel( input_layer_name ) if make_layer_name_in_submodel else input_layer_name # Define a projection for iexed layer @config_class class Projection(Input): type = None # subclass should set it correctly def __init__( self, input_layer_name, size=0, # projection output size parameter_name=None, learning_rate=None, momentum=None, decay_rate=None, decay_rate_l1=None, initial_mean=None, initial_std=None, initial_strategy=None, initial_smart=None, initializer=None, num_batches_regularization=None, sparse_remote_update=None, sparse_update=None, gradient_clipping_threshold=None, ptype=None, format=None, nnz=None, is_static=None, is_shared=None, update_hooks=None, input_layer_argument=None, ): self.add_keys(locals()) self.input_layer_name = MakeLayerNameInSubmodel(input_layer_name) self.proj_conf = ProjectionConfig() if ptype is not None: self.proj_conf.type = ptype else: self.proj_conf.type = self.type # calculate the output_size given input_size. return 0 # to indicate using the size from Layer config def calc_output_size(self, input_layer_config): return self.size def calc_parameter_size(self, input_size, output_size): raise NotimplementedError def calc_parameter_dims(self, input_size, output_size): raise NotimplementedError @config_class class IdentityProjection(Projection): type = 'identity' def calc_output_size(self, input_layer_config): return input_layer_config.size def calc_parameter_size(self, input_size, output_size): return 0 def calc_parameter_dims(self, input_size, output_size): return [] # Like IdentityProjection, but layer size may smaller than input size, # the projection select dimesions [offset, offset+layer_size) from input @config_class class IdentityOffsetProjection(Projection): type = 'identity_offset' def __init__(self, input_layer_name, offset, **xargs): super(IdentityOffsetProjection, self).__init__(input_layer_name, **xargs) self.proj_conf.offset = offset def calc_output_size(self, input_layer_config): return 0 # depends on the outside MixedLayer def calc_parameter_size(self, input_size, output_size): return 0 def calc_parameter_dims(self, input_size, output_size): return [] @config_class class SliceProjection(Projection): type = 'slice' def __init__(self, input_layer_name, slices, **xargs): super(SliceProjection, self).__init__(input_layer_name, **xargs) input = g_layer_map[input_layer_name] if input.type in ["exconv", "cudnn_conv"]: # the slice operator is for the channel dimension assert input.num_filters is not None channels = input.num_filters image_size = input.size / channels assert slices[len(slices) - 1][1] <= channels for i in xrange(len(slices)): slice = self.proj_conf.slices.add() slice.start = slices[i][0] * image_size slice.end = slices[i][1] * image_size self.size += slice.end - slice.start else: config_assert(False, 'Currently the input should be convolution layer') def calc_parameter_size(self, input_size, output_size): return 0 def calc_parameter_dims(self, input_size, output_size): return [] # DotMulProjection performs element-wise multiplication with weight @config_class class DotMulProjection(Projection): type = 'dot_mul' def calc_output_size(self, input_layer_config): return input_layer_config.size def calc_parameter_size(self, input_size, output_size): return output_size def calc_parameter_dims(self, input_size, output_size): return [1, output_size] # ScalingProjection @config_class class ScalingProjection(Projection): type = 'scaling' def calc_output_size(self, input_layer_config): return input_layer_config.size def calc_parameter_size(self, input_size, output_size): return 1 def calc_parameter_dims(self, input_size, output_size): return [1, 1] @config_class class TableProjection(Projection): type = 'table' def calc_parameter_size(self, input_size, output_size): return input_size * output_size def calc_parameter_dims(self, input_size, output_size): return [input_size, output_size] @config_class class FullMatrixProjection(Projection): type = 'fc' def calc_parameter_size(self, input_size, output_size): return input_size * output_size def calc_parameter_dims(self, input_size, output_size): return [input_size, output_size] @config_class class TransposedFullMatrixProjection(Projection): type = 'trans_fc' def calc_parameter_size(self, input_size, output_size): return input_size * output_size def calc_parameter_dims(self, input_size, output_size): return [output_size, input_size] @config_class class ContextProjection(Projection): type = 'context' def __init__(self, input_layer_name, context_start, context_length, trainable_padding, **xargs): super(ContextProjection, self).__init__(input_layer_name, **xargs) self.proj_conf.context_start = context_start self.proj_conf.context_length = context_length self.proj_conf.trainable_padding = trainable_padding self._total_pad = max(0, -self.proj_conf.context_start) \ + max(0, self.proj_conf.context_start \ + self.proj_conf.context_length - 1) def calc_output_size(self, input_layer_config): return input_layer_config.size * self.proj_conf.context_length def calc_parameter_size(self, input_size, output_size): if self.proj_conf.trainable_padding == False: return 0 else: return input_size * self._total_pad def calc_parameter_dims(self, input_size, output_size): return [self._total_pad, input_size] _total_pad = 0 @config_class class ConvBaseProjection(Projection): def __init__(self, input_layer_name, num_filters=None, conv_conf=None, **xargs): super(ConvBaseProjection, self).__init__(input_layer_name, **xargs) if num_filters is not None: self.proj_conf.num_filters = num_filters def calc_output_size(self, input_layer_config): return self.proj_conf.output_size def calc_parameter_size(self, input_size, output_size): co = self.proj_conf.num_filters ci = self.proj_conf.conv_conf.channels fh = self.proj_conf.conv_conf.filter_size fw = self.proj_conf.conv_conf.filter_size_y gr = self.proj_conf.conv_conf.groups return co * ci * fh * fw / gr def calc_bias_size(self): return self.proj_conf.num_filters def calc_parameter_dims(self, input_size, output_size): return None @config_class class ConvProjection(ConvBaseProjection): type = 'conv' def __init__(self, input_layer_name, num_filters=None, conv_conf=None, **xargs): super(ConvProjection, self).__init__(input_layer_name, num_filters, conv_conf, **xargs) parse_conv(conv_conf, self.input_layer_name, self.proj_conf.conv_conf, num_filters) self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \ self.proj_conf.conv_conf.output_y * \ num_filters @config_class class ConvTransProjection(ConvBaseProjection): type = 'convt' def __init__(self, input_layer_name, num_filters=None, conv_conf=None, **xargs): super(ConvTransProjection, self).__init__(input_layer_name, num_filters, conv_conf, **xargs) parse_conv( conv_conf, self.input_layer_name, self.proj_conf.conv_conf, num_filters, trans=True) self.proj_conf.output_size = self.proj_conf.conv_conf.img_size_y * \ self.proj_conf.conv_conf.img_size * \ num_filters # Define a operator for mixed layer @config_class class Operator(Cfg): type = None # subclass should set it correctly def __init__( self, input_layer_names, ): self.add_keys(locals()) self.operator_conf = OperatorConfig() self.operator_conf.type = self.type def check_dims(self): pass def calc_output_size(self, input_sizes): return 0 @config_class class DotMulOperator(Operator): type = 'dot_mul' def __init__(self, input_layer_names, scale=None, **xargs): super(DotMulOperator, self).__init__(input_layer_names, **xargs) if scale is not None: self.operator_conf.dotmul_scale = scale config_assert(len(input_layer_names) == 2, "DotMul is binary operator") def check_dims(self): for i in range(2): config_assert(self.operator_conf.input_sizes[i] == self.operator_conf.output_size, "DotMul input_size != output_size") def calc_output_size(self, input_sizes): return input_sizes[0] @config_class class ConvOperator(Operator): type = 'conv' def __init__(self, input_layer_names, num_filters=None, conv_conf=None, **xargs): super(ConvOperator, self).__init__(input_layer_names, **xargs) if num_filters is not None: self.operator_conf.num_filters = num_filters parse_conv(conv_conf, MakeLayerNameInSubmodel(input_layer_names[0]), self.operator_conf.conv_conf, num_filters) self.operator_conf.output_size = self.operator_conf.conv_conf.output_x * \ self.operator_conf.conv_conf.output_y * \ num_filters config_assert(len(input_layer_names) == 2, "Conv is binary operator") def calc_output_size(self, input_sizes): return self.operator_conf.output_size @config_class class ConvTransOperator(Operator): type = 'convt' def __init__(self, input_layer_names, num_filters=None, conv_conf=None, **xargs): super(ConvTransOperator, self).__init__(input_layer_names, **xargs) if num_filters is not None: self.operator_conf.num_filters = num_filters parse_conv( conv_conf, MakeLayerNameInSubmodel(input_layer_names[0]), self.operator_conf.conv_conf, num_filters, trans=True) self.operator_conf.output_size = \ self.operator_conf.conv_conf.img_size * \ self.operator_conf.conv_conf.img_size_y * \ num_filters config_assert(len(input_layer_names) == 2, "Conv is binary operator") def calc_output_size(self, input_sizes): return self.operator_conf.output_size # please refer to the comments in proto/ModelConfig.proto @config_class class Conv(Cfg): def __init__(self, filter_size, channels, padding=None, stride=None, groups=None, filter_channels=None, output_x=None, img_size=None, caffe_mode=True, filter_size_y=None, padding_y=None, stride_y=None, dilation=None, dilation_y=None): self.add_keys(locals()) if filter_size_y is None: self.filter_size_y = filter_size if padding_y is None: self.padding_y = padding if dilation_y is None: self.dilation_y = dilation if stride_y is None: self.stride_y = stride if output_x is not None: config_assert(output_x <= 0) # please refer to the comments in proto/ModelConfig.proto @config_class class Conv3D(Cfg): def __init__(self, filter_size, channels, padding=None, stride=None, groups=None, filter_channels=None, output_x=None, img_size=None, caffe_mode=True, filter_size_y=None, padding_y=None, stride_y=None, filter_size_z=None, padding_z=None, stride_z=None): self.add_keys(locals()) self.filter_size_y = filter_size_y if filter_size_y else filter_size self.filter_size_z = filter_size_z if filter_size_z else filter_size self.padding_y = padding_y if padding_y else padding self.padding_z = padding_z if padding_z else padding self.stride_y = stride_y if stride_y else stride self.stride_z = stride_z if stride_z else stride if output_x is not None: config_assert(output_x <= 0) @config_class class BilinearInterp(Cfg): def __init__(self, out_size_x=None, out_size_y=None, channels=None): self.add_keys(locals()) @config_class class Pool(Cfg): def __init__( self, pool_type, channels, size_x, size_y=None, start=None, stride=None, # 1 by defalut in protobuf stride_y=None, padding=None, # 0 by defalut in protobuf padding_y=None): self.add_keys(locals()) @config_class class Pool3d(Cfg): def __init__( self, pool_type, channels, size_x, size_y=None, size_z=None, start=None, stride=None, # 1 by defalut in protobuf stride_y=None, stride_z=None, padding=None, # 0 by defalut in protobuf padding_y=None, padding_z=None): self.add_keys(locals()) self.filter_size_y = size_y if size_y else size_x self.filter_size_z = size_z if size_z else size_x self.padding_y = padding_y if padding_y else padding self.padding_z = padding_z if padding_z else padding self.stride_y = stride_y if stride_y else stride self.stride_z = stride_z if stride_z else stride @config_class class SpatialPyramidPool(Cfg): def __init__(self, pool_type, pyramid_height, channels): self.add_keys(locals()) @config_class class Pad(Cfg): def __init__(self, channels, pad_c, pad_h, pad_w): self.add_keys(locals()) @config_class class Upsample(Cfg): def __init__(self, scale, scale_y, pad_out_x, pad_out_y, upsample_size, upsample_size_y): self.add_keys(locals()) @config_class class Norm(Cfg): def __init__(self, norm_type, channels, size, scale, pow, output_x=None, img_size=None, blocked=None): self.add_keys(locals()) @config_class class Image(Cfg): def __init__(self, channels, img_size=None): self.add_keys(locals()) @config_class class BlockExpand(Cfg): def __init__(self, channels, padding_x=0, padding_y=0, stride_x=0, stride_y=0, block_x=0, block_y=0, img_size_x=0, img_size_y=0, output_x=0, output_y=0): self.add_keys(locals()) @config_class class MaxOut(Cfg): def __init__(self, channels, groups, img_size_x=0, img_size_y=0): self.add_keys(locals()) def create_data_config_proto(async_load_data=False, constant_slots=None, data_ratio=1, is_main_data=True, usage_ratio=None): # default: all sub dataproviders are treat as "main data". # see proto/DataConfig.proto for is_main_data data_config = DataConfig() data_config.async_load_data = async_load_data if constant_slots: data_config.constant_slots.extend(constant_slots) data_config.data_ratio = data_ratio data_config.is_main_data = is_main_data usage_ratio = default(usage_ratio, settings_deprecated["usage_ratio"]) config_assert(usage_ratio >= 0 and usage_ratio <= 1, "The range of usage_ratio is [0, 1]") data_config.usage_ratio = usage_ratio return data_config @config_func def SimpleData(files=None, feat_dim=None, context_len=None, buffer_capacity=None, **xargs): data_config = create_data_config_proto(**xargs) data_config.type = 'simple' data_config.files = files data_config.feat_dim = feat_dim if context_len is not None: data_config.context_len = context_len if buffer_capacity: data_config.buffer_capacity = buffer_capacity return data_config @config_func def PyData(files=None, type=None, file_group_queue_capacity=None, load_data_module=None, load_data_object=None, load_data_args="", load_file_count=None, constant_slots=None, load_thread_num=None, **xargs): data_config = create_data_config_proto(**xargs) data_config.type = 'py' if load_data_module in g_py_module_name_list: def get_path(module): m = __import__(load_data_module) return os.path.split(os.path.realpath(m.__file__))[0] # python C-api is not thread safe, one module can only be import once, # so here we nedd to copy the module with different names if it has to be # imported several times. module_new_name = "%s_copy_%d" % (load_data_module, len(g_py_module_name_list)) g_py_module_name_list.append(module_new_name) module_path = "%s/%s.py" % (get_path(load_data_module), load_data_module) new_module_path = "%s/%s.py" % (get_path(load_data_module), module_new_name) if os.path.isfile(module_path) == False: raise Exception("File %s is not exist." % module_path) shutil.copy2(module_path, new_module_path) load_data_module = module_new_name else: g_py_module_name_list.append(load_data_module) if load_data_module is not None and load_data_object is not None: data_config.load_data_module = load_data_module data_config.load_data_object = load_data_object else: raise ValueError('load_data_module, load_data_object is not defined.') data_config.load_data_args = load_data_args data_config.files = files or '' if file_group_queue_capacity is not None: data_config.file_group_conf.queue_capacity = file_group_queue_capacity if load_file_count is not None: data_config.file_group_conf.load_file_count = load_file_count if load_thread_num is not None: data_config.file_group_conf.load_thread_num = load_thread_num if constant_slots: data_config.constant_slots.extend(constant_slots) return data_config #real data for training is actually provided by "sub_data" data providers. @config_func def MultiData(sub_data=[]): data_config = DataConfig() data_config.type = 'multi' data_config.sub_data_configs.extend(sub_data) return data_config @config_func def Data(type, files=None, feat_dim=None, slot_dims=None, context_len=None, buffer_capacity=None, **xargs): data_config = create_data_config_proto(**xargs) data_config.type = type data_config.files = files data_config.feat_dim = feat_dim data_config.slot_dims.extend(slot_dims) if context_len is not None: data_config.context_len = context_len data_config.buffer_capacity = buffer_capacity return data_config @config_func def TrainData(data_config, async_load_data=None): config_assert(not g_config.HasField('data_config'), 'Only one TrainData definition is allowed') g_config.data_config.CopyFrom(data_config) g_config.data_config.for_test = False if async_load_data is not None: logger.warning("Deprecated: async_load_data should be used inside" " Data definition") g_config.data_config.async_load_data = async_load_data @config_func def TestData(data_config, async_load_data=None): config_assert(not g_config.HasField('test_data_config'), 'Only one TestData definition is allowed') g_config.test_data_config.CopyFrom(data_config) g_config.test_data_config.for_test = True if async_load_data is not None: logger.warning("Deprecated: async_load_data should be used inside" " Data definition") g_config.test_data_config.async_load_data = async_load_data #caffe_mode: compute the output size using floor instead of ceil, # which is consistent of caffe and CuDNN's convention. def cnn_output_size(img_size, filter_size, padding, stride, caffe_mode, dilation=1): filter_s = (filter_size - 1) * dilation + 1 output = (2 * padding + img_size - filter_s) / float(stride) if caffe_mode: return 1 + int(math.floor(output)) else: return 1 + int(math.ceil(output)) #calcualte image_size based on output_size for de-convolution (ConvTransLayer). #It is the reverse function of cnn_output_size def cnn_image_size(output_size, filter_size, padding, stride, caffe_mode, dilation=1): filter_s = (filter_size - 1) * dilation + 1 img_size = (output_size - 1) * stride + filter_s - 2 * padding if not caffe_mode: img_size = img_size + 1 return img_size def get_img_size(input_layer_name, channels): input = g_layer_map[input_layer_name] img_pixels = input.size / channels img_size = input.width if input.width > 0 else int(img_pixels**0.5) img_size_y = input.height if input.height > 0 else int(img_pixels / img_size) config_assert( img_size * img_size_y == img_pixels, "Input layer %s: Incorrect input image size %d * %d for input image pixels %d" % (input_layer_name, img_size, img_size_y, img_pixels)) return img_size, img_size_y def get_img3d_size(input_layer_name, channels): input = g_layer_map[input_layer_name] img_pixels = input.size / channels img_size = input.width img_size_y = input.height img_size_z = input.depth config_assert( img_size * img_size_y * img_size_z == img_pixels, "Input layer %s: Incorrect input image size %d * %d * %d for input image pixels %d" % (input_layer_name, img_size, img_size_y, img_size_z, img_pixels)) return img_size, img_size_y, img_size_z def parse_bilinear(bilinear, input_layer_name, bilinear_conf): parse_image(bilinear, input_layer_name, bilinear_conf.image_conf) bilinear_conf.out_size_x = bilinear.out_size_x bilinear_conf.out_size_y = bilinear.out_size_y def parse_pool(pool, input_layer_name, pool_conf, ceil_mode, exclude_mode): pool_conf.pool_type = pool.pool_type config_assert(pool.pool_type in [ 'max-projection', 'avg-projection', 'max-pool-with-mask', 'cudnn-max-pool', 'cudnn-avg-pool' ], "pool-type %s is not in " \ "['max-projection', 'avg-projection', 'max-pool-with-mask'," \ "'cudnn-max-pool', 'cudnn-avg-pool']" % pool.pool_type) pool_conf.channels = pool.channels pool_conf.size_x = pool.size_x pool_conf.stride = pool.stride pool_conf.size_y = default(pool.size_y, pool_conf.size_x) pool_conf.stride_y = default(pool.stride_y, pool_conf.stride) pool_conf.img_size, pool_conf.img_size_y = \ get_img_size(input_layer_name, pool.channels) config_assert(not pool.start, "start is deprecated in pooling.") if pool.padding is not None: pool_conf.padding = pool.padding pool_conf.padding_y = default(pool.padding_y, pool_conf.padding) pool_conf.output_x = cnn_output_size(pool_conf.img_size, pool_conf.size_x, pool_conf.padding, pool_conf.stride, not ceil_mode) pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y, pool_conf.padding_y, pool_conf.stride_y, not ceil_mode) if exclude_mode != None: pool_conf.exclude_mode = exclude_mode def parse_pool3d(pool, input_layer_name, pool_conf, ceil_mode): pool_conf.pool_type = pool.pool_type config_assert(pool.pool_type in ['max-projection', 'avg-projection'], "pool-type %s is not in " "['max-projection', 'avg-projection']" % pool.pool_type) pool_conf.channels = pool.channels pool_conf.size_x = pool.size_x pool_conf.stride = pool.stride pool_conf.padding = pool.padding pool_conf.size_y = default(pool.size_y, pool_conf.size_x) pool_conf.size_z = default(pool.size_z, pool_conf.size_x) pool_conf.stride_y = default(pool.stride_y, pool_conf.stride) pool_conf.stride_z = default(pool.stride_z, pool_conf.stride) pool_conf.padding_y = default(pool.padding_y, pool_conf.padding) pool_conf.padding_z = default(pool.padding_z, pool_conf.padding) pool_conf.img_size, pool_conf.img_size_y, pool_conf.img_size_z = \ get_img3d_size(input_layer_name, pool.channels) config_assert(not pool.start, "start is deprecated in pooling.") if pool.padding is not None: pool_conf.padding = pool.padding pool_conf.padding_y = default(pool.padding_y, pool_conf.padding) pool_conf.padding_z = default(pool.padding_z, pool_conf.padding) pool_conf.output_x = cnn_output_size(pool_conf.img_size, pool_conf.size_x, pool_conf.padding, pool_conf.stride, not ceil_mode) pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y, pool_conf.padding_y, pool_conf.stride_y, not ceil_mode) pool_conf.output_z = cnn_output_size(pool_conf.img_size_z, pool_conf.size_z, pool_conf.padding_z, pool_conf.stride_z, not ceil_mode) def parse_spp(spp, input_layer_name, spp_conf): parse_image(spp, input_layer_name, spp_conf.image_conf) spp_conf.pool_type = spp.pool_type config_assert(spp.pool_type in ['max-projection', 'avg-projection'], "pool-type %s is not in " "['max-projection', 'avg-projection']" % spp.pool_type) spp_conf.pyramid_height = spp.pyramid_height def parse_image(image, input_layer_name, image_conf): image_conf.channels = image.channels image_conf.img_size, image_conf.img_size_y = \ get_img_size(input_layer_name, image_conf.channels) def parse_image3d(image, input_layer_name, image_conf): image_conf.channels = image.channels image_conf.img_size, image_conf.img_size_y, image_conf.img_size_z = \ get_img3d_size(input_layer_name, image_conf.channels) def parse_norm(norm, input_layer_name, norm_conf): norm_conf.norm_type = norm.norm_type config_assert( norm.norm_type in ['rnorm', 'cmrnorm-projection', 'cross-channel-norm'], "norm-type %s is not in [rnorm, cmrnorm-projection, cross-channel-norm]" % norm.norm_type) norm_conf.channels = norm.channels norm_conf.size = norm.size norm_conf.scale = norm.scale norm_conf.pow = norm.pow norm_conf.blocked = norm.blocked norm_conf.img_size, norm_conf.img_size_y = \ get_img_size(input_layer_name, norm.channels) norm_conf.output_x = norm_conf.img_size norm_conf.output_y = norm_conf.img_size_y if norm.norm_type in ['cmrnorm-projection']: norm_conf.scale /= norm.size else: norm_conf.scale /= norm.size**2 #caffe_mode: compute the output size using floor instead of ceil, # which is consistent of caffe and CuDNN's convention. def parse_conv(conv, input_layer_name, conv_conf, num_filters, trans=False): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y conv_conf.channels = conv.channels conv_conf.padding = conv.padding conv_conf.padding_y = conv.padding_y conv_conf.stride = conv.stride conv_conf.stride_y = conv.stride_y conv_conf.groups = conv.groups conv_conf.caffe_mode = conv.caffe_mode if not conv.dilation: conv.dilation = 1 conv.dilation_y = 1 else: conv_conf.dilation = conv.dilation conv_conf.dilation_y = conv.dilation_y if not trans: conv_conf.filter_channels = conv.channels / conv.groups conv_conf.img_size, conv_conf.img_size_y = \ get_img_size(input_layer_name, conv.channels) conv_conf.output_x = cnn_output_size( conv_conf.img_size, conv_conf.filter_size, conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode, conv.dilation) conv_conf.output_y = cnn_output_size( conv_conf.img_size_y, conv_conf.filter_size_y, conv_conf.padding_y, conv_conf.stride_y, conv_conf.caffe_mode, conv.dilation_y) else: conv_conf.filter_channels = num_filters / conv.groups conv_conf.output_x, conv_conf.output_y = \ get_img_size(input_layer_name, conv.channels) conv_conf.img_size = cnn_image_size( conv_conf.output_x, conv_conf.filter_size, conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode, conv.dilation) conv_conf.img_size_y = cnn_image_size( conv_conf.output_y, conv_conf.filter_size_y, conv_conf.padding_y, conv_conf.stride_y, conv_conf.caffe_mode, conv.dilation_y) #caffe_mode: compute the output size using floor instead of ceil, # which is consistent of caffe and CuDNN's convention. def parse_conv3d(conv, input_layer_name, conv_conf, num_filters, trans=False): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y conv_conf.filter_size_z = conv.filter_size_z conv_conf.channels = conv.channels conv_conf.padding = conv.padding conv_conf.padding_y = conv.padding_y conv_conf.padding_z = conv.padding_z conv_conf.stride = conv.stride conv_conf.stride_y = conv.stride_y conv_conf.stride_z = conv.stride_z conv_conf.groups = conv.groups conv_conf.caffe_mode = conv.caffe_mode if not trans: conv_conf.filter_channels = conv.channels / conv.groups conv_conf.img_size, conv_conf.img_size_y, conv_conf.img_size_z = \ get_img3d_size(input_layer_name, conv.channels) conv_conf.output_x = cnn_output_size( conv_conf.img_size, conv_conf.filter_size, conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) conv_conf.output_y = cnn_output_size( conv_conf.img_size_y, conv_conf.filter_size_y, conv_conf.padding_y, conv_conf.stride_y, conv_conf.caffe_mode) conv_conf.output_z = cnn_output_size( conv_conf.img_size_z, conv_conf.filter_size_z, conv_conf.padding_z, conv_conf.stride_z, conv_conf.caffe_mode) else: conv_conf.filter_channels = num_filters / conv.groups conv_conf.output_x, conv_conf.output_y, conv_conf.output_z = \ get_img3d_size(input_layer_name, conv.channels) conv_conf.img_size = cnn_image_size( conv_conf.output_x, conv_conf.filter_size, conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) conv_conf.img_size_y = cnn_image_size( conv_conf.output_y, conv_conf.filter_size_y, conv_conf.padding_y, conv_conf.stride_y, conv_conf.caffe_mode) conv_conf.img_size_z = cnn_image_size( conv_conf.output_z, conv_conf.filter_size_z, conv_conf.padding_z, conv_conf.stride_z, conv_conf.caffe_mode) def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand_conf.channels = block_expand.channels block_expand_conf.stride_x = block_expand.stride_x block_expand_conf.stride_y = block_expand.stride_y block_expand_conf.padding_x = block_expand.padding_x block_expand_conf.padding_y = block_expand.padding_y block_expand_conf.block_x = block_expand.block_x block_expand_conf.block_y = block_expand.block_y block_expand_conf.img_size_x = block_expand.img_size_x block_expand_conf.img_size_y = block_expand.img_size_y if block_expand_conf.img_size_x == 0: block_expand_conf.output_x = 0 else: block_expand_conf.output_x = cnn_output_size( block_expand.img_size_x, block_expand.block_x, block_expand.padding_x, block_expand.stride_x, False) if block_expand_conf.img_size_y == 0: block_expand_conf.output_y = 0 else: block_expand_conf.output_y = cnn_output_size( block_expand.img_size_y, block_expand.block_y, block_expand.padding_y, block_expand.stride_y, False) def parse_maxout(maxout, input_layer_name, maxout_conf): parse_image(maxout, input_layer_name, maxout_conf.image_conf) maxout_conf.groups = maxout.groups # Define an evaluator @config_func def Evaluator(name, type, inputs, chunk_scheme=None, num_chunk_types=None, classification_threshold=None, positive_label=None, dict_file=None, result_file=None, num_results=None, top_k=None, delimited=None, excluded_chunk_types=None, overlap_threshold=None, background_id=None, evaluate_difficult=None, ap_type=None): evaluator = g_config.model_config.evaluators.add() evaluator.type = type evaluator.name = MakeLayerNameInSubmodel(name) if type_of(inputs) == str: inputs = [inputs] evaluator.input_layers.extend( [MakeLayerNameInSubmodel(name) for name in inputs]) if chunk_scheme is not None: evaluator.chunk_scheme = chunk_scheme evaluator.num_chunk_types = num_chunk_types g_current_submodel.evaluator_names.append(evaluator.name) if classification_threshold is not None: evaluator.classification_threshold = classification_threshold if positive_label is not None: evaluator.positive_label = positive_label if dict_file is not None: evaluator.dict_file = dict_file if result_file is not None: evaluator.result_file = result_file if num_results is not None: evaluator.num_results = num_results if top_k is not None: evaluator.top_k = top_k if delimited is not None: evaluator.delimited = delimited if excluded_chunk_types: evaluator.excluded_chunk_types.extend(excluded_chunk_types) if overlap_threshold is not None: evaluator.overlap_threshold = overlap_threshold if background_id is not None: evaluator.background_id = background_id if evaluate_difficult is not None: evaluator.evaluate_difficult = evaluate_difficult if ap_type is not None: evaluator.ap_type = ap_type class LayerBase(object): def __init__( self, name, type, size, # size can be 0. In this case, subclass should set it. inputs, device=None, active_type="", drop_rate=0., coeff=None, error_clipping_threshold=None): config_assert('@' not in name, "layer name: %s contain special character @" % name) global g_current_submodel name = MakeLayerNameInSubmodel(name) config_assert(name not in g_layer_map, 'Duplicated layer name: %s' % name) self.inputs = copy.deepcopy(inputs) self.operators = [] if self.inputs is None: self.inputs = [] elif type_of(self.inputs) != list: self.inputs = [self.inputs] self.config = g_config.model_config.layers.add() assert isinstance(self.config, LayerConfig) use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) mkldnn_acts = ['relu', 'tanh', 'softmax'] if use_mkldnn and active_type in mkldnn_acts: active_type = "mkldnn_" + active_type self.config.name = name self.config.type = type self.config.active_type = active_type if coeff is not None: self.config.coeff = float(coeff) if size != 0: self.config.size = size if drop_rate != 0: self.config.drop_rate = drop_rate if device is not None: self.config.device = device elif g_default_device is not None: self.config.device = g_default_device if error_clipping_threshold is not None: self.config.error_clipping_threshold = error_clipping_threshold for input_index in xrange(len(self.inputs)): input = self.inputs[input_index] input_config = None input_layer_name = '' if type_of(input) == str: input_layer_name = input input_config = Input( input_layer_name=input, parameter_name=gen_parameter_name(name, input_index)) input_layer_name = input_config.input_layer_name elif isinstance(input, Input): input_layer_name = input.input_layer_name input_config = input if input_config.parameter_name is None: input_config.parameter_name = \ gen_parameter_name(name, input_index) elif isinstance(input, Operator): self.operators.append(input) input.operator_conf.input_indices.append(input_index) input_config = Input(input.input_layer_names[0]) input_layer_name = input_config.input_layer_name else: raise ValueError('Wrong type for inputs: %s' % type_of(input)) config_assert(input_layer_name in g_layer_map, "Unknown input layer '%s' for layer %s" % (input_layer_name, name)) self.inputs[input_index] = input_config layer_input = self.config.inputs.add() layer_input.input_layer_name = input_config.input_layer_name if input_config.input_layer_argument is not None: layer_input.input_layer_argument = \ input_config.input_layer_argument g_layer_map[name] = self.config g_current_submodel.layer_names.append(self.config.name) def get_input_layer(self, input_index): return g_layer_map[self.config.inputs[input_index].input_layer_name] # will return the bias created if not *for_self* def create_bias_parameter( self, bias, # True/False or BiasCfg size, dims=None, for_self=True, # whether create bias for layer self ): if size == 0: return if dims is None: dims = [1, size] config_assert( type_of(bias) == bool or type_of(bias) == Bias, 'Incorrect type for bias: %s' % type_of(bias)) if type_of(bias) == bool: if bias: bias = Bias() if type_of(bias) == Bias: if bias.parameter_name is None: bias.parameter_name = gen_bias_parameter_name(self.config.name) if bias.parameter_name not in g_parameter_map: assert isinstance(self.config, LayerConfig) Parameter( bias.parameter_name, size, self.config.device if self.config.HasField('device') else None, dims, bias.learning_rate, bias.momentum, decay_rate=bias.decay_rate, decay_rate_l1=bias.decay_rate_l1, initial_mean=bias.initial_mean, initial_std=bias.initial_std, initial_strategy=bias.initial_strategy, initial_smart=bias.initial_smart, num_batches_regularization=bias.num_batches_regularization, sparse_remote_update=bias.sparse_remote_update, gradient_clipping_threshold=bias. gradient_clipping_threshold, is_static=bias.is_static, is_shared=bias.is_shared, initializer=bias.initializer) if for_self: self.config.bias_parameter_name = bias.parameter_name else: return bias.parameter_name def create_input_parameter(self, input_index, size, dims=None, sparse=None, format=None): if dims is None: # TODO(yuyang18): print warning and callstack here! dims = list() if size == 0: return input_config = self.inputs[input_index] self.config.inputs[input_index].input_parameter_name = \ input_config.parameter_name if input_config.parameter_name in g_parameter_map: para = g_parameter_map[input_config.parameter_name] config_assert(size == para.size, ( 'Shared parameter "%s" does not ' + 'have same size: %s vs. %s') % (input_config.parameter_name, para.size, size)) config_assert(dims == para.dims, ( 'Shared parameter "%s" does not ' + 'have same dims: %s vs. %s') % (input_config.parameter_name, para.dims, dims)) return Parameter( input_config.parameter_name, size, self.config.device if self.config.HasField("device") else None, dims, input_config.learning_rate, input_config.momentum, decay_rate=input_config.decay_rate, decay_rate_l1=input_config.decay_rate_l1, initial_mean=input_config.initial_mean, initial_std=input_config.initial_std, initial_strategy=input_config.initial_strategy, initial_smart=input_config.initial_smart, num_batches_regularization=input_config.num_batches_regularization, sparse_remote_update=input_config.sparse_remote_update, sparse_update=input_config.sparse_update, gradient_clipping_threshold=input_config. gradient_clipping_threshold, sparse=sparse, format=format, is_static=input_config.is_static, is_shared=input_config.is_shared, update_hooks=input_config.update_hooks, initializer=input_config.initializer) def set_layer_size(self, size): if self.config.size == 0: self.config.size = size else: config_assert(self.config.size == size, 'Different inputs result in' + 'different layer size at layer %s' % self.config.name) def set_layer_height_width(self, height, width): self.config.height = height self.config.width = width def set_layer_depth(self, depth): self.config.depth = depth def set_cnn_layer(self, input_layer_name, height, width, channels, is_print=True): size = height * width * channels self.set_layer_size(size) self.set_layer_height_width(height, width) if is_print: print("output for %s: c = %d, h = %d, w = %d, size = %d" % (input_layer_name, channels, height, width, size)) @config_layer('multi_class_cross_entropy_with_selfnorm') class MultiClassCrossEntropySelfNormCostLayer(LayerBase): def __init__(self, name, inputs, softmax_selfnorm_alpha=0.1, **xargs): super(MultiClassCrossEntropySelfNormCostLayer, self).__init__( name, 'multi_class_cross_entropy_with_selfnorm', 0, inputs, **xargs) self.config.softmax_selfnorm_alpha = softmax_selfnorm_alpha @config_layer('cross_entropy_over_beam') class CrossEntropyOverBeamLayer(LayerBase): def __init__(self, name, inputs, **xargs): config_assert(len(inputs) % 3 == 0, "Error input number.") super(CrossEntropyOverBeamLayer, self).__init__( name, 'cross_entropy_over_beam', 0, inputs, **xargs) input_num = len(inputs) / 3 for i in range(input_num): input_layer = self.get_input_layer(i * 3) config_assert(input_layer.size == 1, ( "Inputs for this layer are made up of " "several triples, in which the first one is scores over " "all candidate paths, whose size should be equal to 1.")) @config_layer('fc') class FCLayer(LayerBase): layer_type = 'fc' def __init__(self, name, size, inputs, bias=True, error_clipping_threshold=None, **xargs): use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) use_mkldnn_wgt = bool( int(g_command_config_args.get("use_mkldnn_wgt", 0))) if use_mkldnn: self.layer_type = 'mkldnn_fc' config_assert( len(inputs) == 1, "MKLDNNFCLayer support one and only one input!") super(FCLayer, self).__init__( name, self.layer_type, size, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) psize = self.config.size * input_layer.size dims = [input_layer.size, self.config.size] format = self.inputs[input_index].format sparse = format == "csr" or format == "csc" if use_mkldnn: config_assert(not sparse, "MKLDNNFCLayer do not support sparse format yet") if use_mkldnn_wgt: dims = [self.config.size, input_layer.size] if sparse: psize = self.inputs[input_index].nnz else: sparse = None self.create_input_parameter(input_index, psize, dims, sparse, format) self.create_bias_parameter(bias, self.config.size) if error_clipping_threshold is not None: self.config.error_clipping_threshold = error_clipping_threshold @config_layer('mkldnn_fc') class MKLDNNFcLayer(FCLayer): layer_type = 'mkldnn_fc' @config_layer('selective_fc') class SelectiveFCLayer(LayerBase): def __init__(self, name, size, inputs, bias=True, selective_fc_pass_generation=False, has_selected_colums=True, selective_fc_full_mul_ratio=0.02, selective_fc_parallel_plain_mul_thread_num=None, **xargs): super(SelectiveFCLayer, self).__init__( name, 'selective_fc', size, inputs=inputs, **xargs) # user MUST know if selctive fc is used in training, # parameter matrices saved by this layer are automatically transposed, # BUT bias is not. # if selective_fc is used only in testing mode, and parameters for # this layer are trained by fully connected layers, # then TranposedFullMatrixProjectin MUST be used in training # to avoid manual transpose in testing. self.config.selective_fc_pass_generation = selective_fc_pass_generation self.config.has_selected_colums = has_selected_colums self.config.selective_fc_full_mul_ratio = selective_fc_full_mul_ratio if selective_fc_parallel_plain_mul_thread_num is not None: self.config.selective_fc_parallel_plain_mul_thread_num = selective_fc_parallel_plain_mul_thread_num input_num = len(self.inputs) if has_selected_colums: config_assert(input_num >= 2, ("if indices of selected columns are not specified, " "selective_fc Layer has at least two inputs")) input_num -= 1 for input_index in xrange(input_num): input_layer = self.get_input_layer(input_index) psize = self.config.size * input_layer.size dims = [input_layer.size, self.config.size] dims = dims[::-1] # transpose the parameter format = self.inputs[input_index].format sparse = format == "csr" or format == "csc" if sparse: psize = self.inputs[input_index].nnz self.create_input_parameter(input_index, psize, dims, sparse, format) self.create_bias_parameter(bias, self.config.size) @config_layer('print') class PrintLayer(LayerBase): def __init__(self, name, inputs, format=None): super(PrintLayer, self).__init__(name, 'print', 0, inputs) if format is None: format = "\n".join([ "layer=" + input.input_layer_name + " %s" for input in self.inputs ]) self.config.user_arg = format @config_layer('priorbox') class PriorBoxLayer(LayerBase): def __init__(self, name, inputs, size, min_size, max_size, aspect_ratio, variance): super(PriorBoxLayer, self).__init__(name, 'priorbox', 0, inputs) config_assert(len(inputs) == 2, 'PriorBoxLayer must have 2 inputs') input_layer = self.get_input_layer(1) config_assert( input_layer.type == 'data', 'Expecting the second input layer of an priorbox layer to be ' 'a data layer') config_assert(input_layer.width > 0, 'The data layer must set width') config_assert(input_layer.height > 0, 'The data layer must set height') config_assert(len(variance) == 4, 'The variance must have 4 inputs') self.config.inputs[0].priorbox_conf.min_size.extend(min_size) self.config.inputs[0].priorbox_conf.max_size.extend(max_size) self.config.inputs[0].priorbox_conf.aspect_ratio.extend(aspect_ratio) self.config.inputs[0].priorbox_conf.variance.extend(variance) self.config.size = size @config_layer('multibox_loss') class MultiBoxLossLayer(LayerBase): def __init__(self, name, inputs, input_num, num_classes, overlap_threshold, neg_pos_ratio, neg_overlap, background_id, **xargs): super(MultiBoxLossLayer, self).__init__(name, 'multibox_loss', 0, inputs) config_assert( len(inputs) == (input_num * 2 + 2), 'MultiBoxLossLayer does not have enough inputs') config_assert(num_classes > background_id, 'Classes number must greater than background ID') self.config.inputs[0].multibox_loss_conf.num_classes = num_classes self.config.inputs[ 0].multibox_loss_conf.overlap_threshold = overlap_threshold self.config.inputs[0].multibox_loss_conf.neg_pos_ratio = neg_pos_ratio self.config.inputs[0].multibox_loss_conf.neg_overlap = neg_overlap self.config.inputs[0].multibox_loss_conf.background_id = background_id self.config.inputs[0].multibox_loss_conf.input_num = input_num self.config.size = 1 @config_layer('detection_output') class DetectionOutputLayer(LayerBase): def __init__(self, name, inputs, size, input_num, num_classes, nms_threshold, nms_top_k, keep_top_k, confidence_threshold, background_id, **xargs): super(DetectionOutputLayer, self).__init__(name, 'detection_output', 0, inputs) config_assert( len(inputs) == (input_num * 2 + 1), 'DetectionOutputLayer does not have enough inputs') config_assert(num_classes > background_id, 'Classes number must greater than background ID') self.config.inputs[0].detection_output_conf.num_classes = num_classes self.config.inputs[ 0].detection_output_conf.nms_threshold = nms_threshold self.config.inputs[0].detection_output_conf.nms_top_k = nms_top_k self.config.inputs[0].detection_output_conf.keep_top_k = keep_top_k self.config.inputs[ 0].detection_output_conf.confidence_threshold = confidence_threshold self.config.inputs[ 0].detection_output_conf.background_id = background_id self.config.inputs[0].detection_output_conf.input_num = input_num self.config.size = size @config_layer('roi_pool') class ROIPoolLayer(LayerBase): def __init__(self, name, inputs, pooled_width, pooled_height, spatial_scale, num_channels, **xargs): super(ROIPoolLayer, self).__init__(name, 'roi_pool', 0, inputs) config_assert(len(inputs) == 2, 'ROIPoolLayer must have 2 inputs') self.config.inputs[0].roi_pool_conf.pooled_width = pooled_width self.config.inputs[0].roi_pool_conf.pooled_height = pooled_height self.config.inputs[0].roi_pool_conf.spatial_scale = spatial_scale self.set_cnn_layer(name, pooled_height, pooled_width, num_channels) @config_layer('data') class DataLayer(LayerBase): def __init__(self, name, size, depth=None, height=None, width=None, device=None): super(DataLayer, self).__init__( name, 'data', size, inputs=[], device=device) if height and width: self.set_layer_height_width(height, width) if depth: self.set_layer_depth(depth) ''' DataNormLayer: A layer for data normalization Input: One and only one input layer is accepted. The input layer must be DataLayer with dense data type Output: The normalization of the input data Reference: LA Shalabi, Z Shaaban, B Kasasbeh. Data mining: A preprocessing engine Example: Layer( name = "norm_input_layer", type = "data_norm", inputs = [Input("input_layer", parameter_name = "_slot0.stats")], data_norm_strategy = "z-score", ) Note: (1) The parameter has been calculated in the preprocessing stage, and should be initialized by --init_model_path when training. (2) Three data normalization methoeds are considered z-score: y = (x-mean)/std min-max: y = (x-min)/(max-min) decimal-scaling: y = x/10^j, where j is the smallest integer such that max(|y|)<1 ''' @config_layer('data_norm') class DataNormLayer(LayerBase): def __init__(self, name, inputs, data_norm_strategy="z-score", device=None): super(DataNormLayer, self).__init__( name, 'data_norm', 0, inputs=inputs, device=device) self.config.data_norm_strategy = data_norm_strategy config_assert(len(inputs) == 1, 'DataNormLayer must have 1 input') input_layer = self.get_input_layer(0) self.set_layer_size(input_layer.size) para_size = 5 * input_layer.size para_dims = [5, input_layer.size] self.inputs[0].is_static = True self.create_input_parameter(0, para_size, para_dims) @config_layer('prelu') class ParameterReluLayer(LayerBase): layer_type = 'prelu' def __init__(self, name, inputs, partial_sum=1, **args): super(ParameterReluLayer, self).__init__( name, self.layer_type, 0, inputs=inputs, **args) input_layer = self.get_input_layer(0) config_assert(len(self.inputs) == 1, "prelu layer has only one input.") config_assert(input_layer.size % partial_sum == 0, "a wrong setting for partial_sum") dims = [1, input_layer.size / partial_sum] self.set_layer_size(input_layer.size) self.config.partial_sum = partial_sum self.create_input_parameter(0, input_layer.size / partial_sum, dims) self.set_layer_height_width(self.get_input_layer(0).height, \ self.get_input_layer(0).width) self.set_layer_depth(self.get_input_layer(0).depth) @config_layer('conv') class ConvLayerBase(LayerBase): layer_type = 'conv' def __init__(self, name, inputs=[], bias=True, num_filters=None, shared_biases=False, **xargs): super(ConvLayerBase, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) if num_filters is not None: self.config.num_filters = num_filters use_mkldnn = int(g_command_config_args.get("use_mkldnn", 0)) use_gpu = int(g_command_config_args.get("use_gpu", 0)) parallel_nn = int(g_command_config_args.get("parallel_nn", 0)) # Automatically select cudnn_type for GPU, exconv for CPU # and mkldnn_conv for MKLDNN # if set type=conv, but still reserve the way user specify # exconv, mkldnn_conv or cudnn_conv manually. if self.layer_type == "cudnn_conv": config_assert(use_gpu, "cudnn_conv only support GPU") if self.layer_type == "mkldnn_conv": config_assert(use_mkldnn, "mkldnn_conv only support MKLDNN") if (use_gpu == 1 and self.layer_type != "exconv" and self.layer_type != "mkldnn_conv" and (parallel_nn == 0 or self.config.device > -1)): self.layer_type = "cudnn_conv" else: self.layer_type = "mkldnn_conv" if use_mkldnn else "exconv" # need to specify layer in config self.config.type = self.layer_type if shared_biases is not None: self.config.shared_biases = shared_biases for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) conv_conf = self.config.inputs[input_index].conv_conf parse_conv(self.inputs[input_index].conv, input_layer.name, conv_conf, num_filters) psize = self.calc_parameter_size(conv_conf) self.create_input_parameter(input_index, psize) self.set_cnn_layer(name, conv_conf.output_y, conv_conf.output_x, self.config.num_filters) psize = self.config.size if shared_biases: psize = self.config.num_filters self.create_bias_parameter(bias, psize, [psize, 1]) def calc_parameter_size(self, conv_conf): return self.config.num_filters * conv_conf.filter_channels \ * (conv_conf.filter_size * conv_conf.filter_size_y) @config_layer('exconv') class ConvLayer(ConvLayerBase): layer_type = 'exconv' @config_layer('mkldnn_conv') class ConvLayer(ConvLayerBase): layer_type = 'mkldnn_conv' @config_layer('cudnn_conv') class ConvLayer(ConvLayerBase): layer_type = 'cudnn_conv' @config_layer('convt') class ConvTransLayerBase(LayerBase): layer_type = 'convt' def __init__(self, name, inputs=[], bias=True, num_filters=None, shared_biases=False, **xargs): super(ConvTransLayerBase, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) if num_filters is not None: self.config.num_filters = num_filters use_gpu = int(g_command_config_args.get("use_gpu", 0)) parallel_nn = int(g_command_config_args.get("parallel_nn", 0)) # Automatically select cudnn_type for GPU and exconvt for CPU # if set type=exconvt, but still reserve the way user specify # exconvt or cudnn_convt manually. if self.layer_type == "cudnn_convt": config_assert(use_gpu, "cudnn_convt only support GPU") if (use_gpu == 1 and self.layer_type != "exconvt" and (parallel_nn == 0 or self.config.device > -1)): self.layer_type = "cudnn_convt" else: self.layer_type = "exconvt" # need to specify layer in config self.config.type = self.layer_type if shared_biases is not None: self.config.shared_biases = shared_biases for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) parse_conv( self.inputs[input_index].conv, input_layer.name, self.config.inputs[input_index].conv_conf, num_filters, trans=True) conv_conf = self.config.inputs[input_index].conv_conf psize = self.calc_parameter_size(conv_conf) self.create_input_parameter(input_index, psize) self.set_cnn_layer(name, conv_conf.img_size_y, conv_conf.img_size, self.config.num_filters) psize = self.config.size if shared_biases: psize = self.config.num_filters self.create_bias_parameter(bias, psize, [psize, 1]) def calc_parameter_size(self, conv_conf): return conv_conf.channels * conv_conf.filter_channels \ * (conv_conf.filter_size * conv_conf.filter_size_y) @config_layer('exconvt') class ConvTransLayer(ConvTransLayerBase): layer_type = 'exconvt' @config_layer('cudnn_convt') class ConvTransLayer(ConvTransLayerBase): layer_type = 'cudnn_convt' @config_layer('conv_3d') class Conv3DLayerBase(LayerBase): def __init__(self, name, inputs=[], bias=True, num_filters=None, shared_biases=True, **xargs): super(Conv3DLayerBase, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) if num_filters is not None: self.config.num_filters = num_filters # need to specify layer in config self.config.type = self.layer_type trans = False if self.config.type == "deconv3d": trans = True if shared_biases is not None: self.config.shared_biases = shared_biases for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) conv_conf = self.config.inputs[input_index].conv_conf parse_conv3d( self.inputs[input_index].conv, input_layer.name, conv_conf, num_filters, trans=trans ) # for z-axis pad:0, strid:1, filter_size:1, img_size:1 psize = self.calc_parameter_size(conv_conf) self.create_input_parameter(input_index, psize) if trans: self.set_cnn_layer(name, conv_conf.img_size_z, conv_conf.img_size_y, conv_conf.img_size, self.config.num_filters) else: self.set_cnn_layer(name, conv_conf.output_z, conv_conf.output_y, conv_conf.output_x, self.config.num_filters) psize = self.config.size if shared_biases: psize = self.config.num_filters self.create_bias_parameter(bias, psize, [psize, 1]) def calc_parameter_size(self, conv_conf): return self.config.num_filters * conv_conf.filter_channels \ * (conv_conf.filter_size * conv_conf.filter_size_y \ * conv_conf.filter_size_z) def set_cnn_layer(self, input_layer_name, depth, height, width, channels, is_print=True): size = depth * height * width * channels self.set_layer_size(size) self.set_layer_height_width(height, width) self.set_layer_depth(depth) if is_print: print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" % (input_layer_name, channels, depth, height, width, size)) @config_layer('conv3d') class Conv3DLayer(Conv3DLayerBase): layer_type = 'conv3d' @config_layer('deconv3d') class Conv3DLayer(Conv3DLayerBase): layer_type = 'deconv3d' @config_layer('norm') class NormLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(NormLayer, self).__init__(name, 'norm', 0, inputs=inputs, **xargs) use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) use_mkldnn = True if use_mkldnn and self.inputs[ 0].norm.norm_type == 'cmrnorm-projection' else False self.config.type = 'mkldnn_lrn' if use_mkldnn else self.config.type for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) norm_conf = self.config.inputs[input_index].norm_conf parse_norm(self.inputs[input_index].norm, input_layer.name, norm_conf) norm_conf.scale = self.inputs[ input_index].norm.scale if use_mkldnn else norm_conf.scale self.set_cnn_layer(name, norm_conf.output_y, norm_conf.output_x, norm_conf.channels, False) if norm_conf.norm_type == "cross-channel-norm": self.create_input_parameter(0, norm_conf.channels, [norm_conf.channels, 1]) @config_layer('pool') class PoolLayer(LayerBase): layer_type = 'pool' def __init__(self, name, inputs, ceil_mode=True, exclude_mode=None, **xargs): use_mkldnn = int(g_command_config_args.get("use_mkldnn", 0)) if self.layer_type == "mkldnn_pool": config_assert(use_mkldnn, "mkldnn_pool only support MKLDNN") self.layer_type = 'mkldnn_pool' if use_mkldnn else 'pool' super(PoolLayer, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) pool_conf = self.config.inputs[input_index].pool_conf parse_pool(self.inputs[input_index].pool, input_layer.name, pool_conf, ceil_mode, exclude_mode) self.set_cnn_layer(name, pool_conf.output_y, pool_conf.output_x, pool_conf.channels) @config_layer('mkldnn_pool') class MKLDNNPoolLayer(PoolLayer): layer_type = 'mkldnn_pool' @config_layer('pool3d') class Pool3DLayer(LayerBase): def __init__(self, name, inputs, ceil_mode=True, **xargs): super(Pool3DLayer, self).__init__( name, 'pool3d', 0, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) pool_conf = self.config.inputs[input_index].pool_conf parse_pool3d(self.inputs[input_index].pool, input_layer.name, pool_conf, ceil_mode) self.set_cnn_layer(name, pool_conf.output_z, pool_conf.output_y, pool_conf.output_x, pool_conf.channels) def set_cnn_layer(self, input_layer_name, depth, height, width, channels, is_print=True): size = depth * height * width * channels self.set_layer_size(size) self.set_layer_height_width(height, width) self.set_layer_depth(depth) if is_print: print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" % (input_layer_name, channels, depth, height, width, size)) @config_layer('spp') class SpatialPyramidPoolLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(SpatialPyramidPoolLayer, self).__init__( name, 'spp', 0, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) spp_conf = self.config.inputs[input_index].spp_conf parse_spp(self.inputs[input_index].spp, input_layer.name, spp_conf) output_x = (pow(4, spp_conf.pyramid_height) - 1) / (4 - 1) self.set_cnn_layer(name, 1, output_x, spp_conf.image_conf.channels) @config_layer('upsample') class UpsampleLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(UpsampleLayer, self).__init__( name, 'upsample', 0, inputs=inputs, **xargs) input_layer = self.get_input_layer(0) image_conf = self.config.inputs[0].upsample_conf.image_conf image_conf.img_size = input_layer.width image_conf.img_size_y = input_layer.height image_conf.channels = input_layer.size / (input_layer.width * input_layer.height) upsample = self.inputs[0].upsample output_x = 0 output_y = 0 output_size = 0 if upsample.scale: self.config.inputs[0].upsample_conf.scale = upsample.scale self.config.inputs[0].upsample_conf.scale_y = upsample.scale_y output_x = input_layer.width * upsample.scale output_y = input_layer.height * upsample.scale_y self.config.inputs[0].upsample_conf.pad_out_x = upsample.pad_out_x self.config.inputs[0].upsample_conf.pad_out_y = upsample.pad_out_y if upsample.upsample_size: self.config.inputs[ 0].upsample_conf.upsample_size = upsample.upsample_size self.config.inputs[ 0].upsample_conf.upsample_size_y = upsample.upsample_size_y output_x = upsample.upsample_size output_y = upsample.upsample_size_y output_size = image_conf.channels * output_x * output_y self.set_layer_height_width(output_y, output_x) self.set_layer_depth(input_layer.depth) self.set_layer_size(output_size) @config_layer('pad') class PadLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(PadLayer, self).__init__(name, 'pad', 0, inputs=inputs, **xargs) pad = self.inputs[0].pad self.config.inputs[0].pad_conf.pad_c.extend(pad.pad_c) self.config.inputs[0].pad_conf.pad_h.extend(pad.pad_h) self.config.inputs[0].pad_conf.pad_w.extend(pad.pad_w) input_layer = self.get_input_layer(0) image_conf = self.config.inputs[0].pad_conf.image_conf parse_image(pad, input_layer.name, image_conf) out_ch = pad.channels + pad.pad_c[0] + pad.pad_c[1] out_h = image_conf.img_size_y + pad.pad_h[0] + pad.pad_h[1] out_w = image_conf.img_size + pad.pad_w[0] + pad.pad_w[1] self.set_cnn_layer(name, out_h, out_w, out_ch) self.config.size = out_ch * out_h * out_w @config_layer('crop') class CropLayer(LayerBase): def __init__(self, name, inputs, axis, offset, shape, **xargs): super(CropLayer, self).__init__(name, 'crop', 0, inputs=inputs, **xargs) self.config.axis = axis self.config.offset.extend(offset) self.config.shape.extend(shape) # get channel, width and height from input_0 layer input_layer = self.get_input_layer(0) image_conf = self.config.inputs[0].image_conf image_conf.img_size = input_layer.width image_conf.img_size_y = input_layer.height image_conf.channels = input_layer.size / (input_layer.width * input_layer.height) # only support for 4-dims inputs and NCHW order if (len(self.config.inputs) == 2): self.set_layer_height_width( self.get_input_layer(1).height, self.get_input_layer(1).width) self.set_layer_size(self.get_input_layer(1).size) else: self.set_layer_height_width(shape[-2], shape[-1]) self.set_layer_size(reduce(lambda x, y: x * y, shape[1:])) @config_layer('batch_norm') class BatchNormLayer(LayerBase): layer_type = 'batch_norm' def __init__(self, name, inputs, bias=True, img3D=False, use_global_stats=True, epsilon=1e-5, moving_average_fraction=0.9, batch_norm_type=None, mean_var_names=None, **xargs): if inputs is None: inputs = [] elif not isinstance(inputs, list): inputs = [inputs] config_assert( len(inputs) == 1, "BatchNormLayer must have one and only one input") # Create Input for moving mean and std, # in batch normalization layer. # These paras no need to update, so set is_static is true. # If not use is_static, even set learning_rate = 0, decay_rate = 0, # these paras will change if set average_window in configure. use_gpu = bool(int(g_command_config_args.get("use_gpu", 0))) use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) is_shared = True if not use_gpu else False for i in xrange(2): inputs.append( Input( inputs[0].input_layer_name, initial_std=0.0, initial_mean=0.0, is_static=True, is_shared=is_shared, make_layer_name_in_submodel=False, )) parallel_nn = bool(int(g_command_config_args.get("parallel_nn", 0))) cudnn_version = int(g_command_config_args.get("cudnn_version", 0)) # Automatically select cudnn_batch_norm for GPU, batch_norm for CPU # and mkldnn_batch_norm for MKLDNN. Also based on cudnn version. if batch_norm_type == "mkldnn_batch_norm": config_assert(use_mkldnn, "mkldnn_batch_norm only support MKLDNN") use_cudnn = use_gpu and batch_norm_type != "batch_norm" and \ not use_mkldnn and batch_norm_type != "mkldnn_batch_norm" and \ ((not parallel_nn) or self.config.device > -1) if use_cudnn: self.layer_type = "cudnn_batch_norm" else: self.layer_type = "mkldnn_batch_norm" if use_mkldnn else "batch_norm" super(BatchNormLayer, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) if use_global_stats is not None: self.config.use_global_stats = use_global_stats if moving_average_fraction is not None: self.config.moving_average_fraction = moving_average_fraction if epsilon is not None: assert epsilon >= 1e-5, "epsilon must be no less than 1e-5." self.config.epsilon = epsilon input_layer = self.get_input_layer(0) image_conf = self.config.inputs[0].image_conf if img3D: parse_image3d(self.inputs[0].image, input_layer.name, image_conf) # Only pass the width and height of input to batch_norm layer # when either of it is non-zero. if input_layer.width != 0 or input_layer.height != 0: self.set_cnn_layer( input_layer_name=name, depth=image_conf.img_size_z, height=image_conf.img_size_y, width=image_conf.img_size, channels=image_conf.channels, is_print=True) else: self.set_layer_size(input_layer.size) else: parse_image(self.inputs[0].image, input_layer.name, image_conf) # Only pass the width and height of input to batch_norm layer # when either of it is non-zero. if input_layer.width != 0 or input_layer.height != 0: self.set_cnn_layer( input_layer_name=name, height=image_conf.img_size_y, width=image_conf.img_size, channels=image_conf.channels, is_print=True) else: self.set_layer_size(input_layer.size) psize = self.calc_parameter_size(image_conf) dims = [1, psize] if mean_var_names is not None: assert len(mean_var_names) == 2 self.inputs[1].parameter_name = mean_var_names[0] self.inputs[2].parameter_name = mean_var_names[1] self.create_input_parameter(0, psize) self.create_input_parameter(1, psize, dims) self.create_input_parameter(2, psize, dims) self.create_bias_parameter(bias, psize) def set_cnn_layer(self, input_layer_name, depth=None, height=None, width=None, channels=None, is_print=True): depthIsNone = False if depth is None: depth = 1 depthIsNone = True size = depth * height * width * channels self.set_layer_size(size) self.set_layer_height_width(height, width) self.set_layer_depth(depth) if is_print and depthIsNone: print("output for %s: c = %d, h = %d, w = %d, size = %d" % (input_layer_name, channels, height, width, size)) elif is_print: print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" % (input_layer_name, channels, depth, height, width, size)) def calc_parameter_size(self, image_conf): return image_conf.channels @config_layer('trans') class TransLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(TransLayer, self).__init__( name, 'trans', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'TransLayer must have one and only one input') self.set_layer_size(self.get_input_layer(0).size) @config_layer('resize') class ResizeLayer(LayerBase): def __init__(self, name, size, inputs, **xargs): super(ResizeLayer, self).__init__( name, 'resize', size=size, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'ResizeLayer must have one and only one input') @config_layer('rotate') class RotateLayer(LayerBase): def __init__(self, name, inputs, height, width, device=None): super(RotateLayer, self).__init__( name, 'rotate', 0, inputs=inputs, device=device) config_assert( len(self.inputs) == 1, 'RotateLayer must have one and only one input') self.set_layer_height_width(height, width) self.set_layer_size(self.get_input_layer(0).size) @config_layer('blockexpand') class BlockExpandLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(BlockExpandLayer, self).__init__( name, 'blockexpand', 0, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) parse_block_expand( self.inputs[input_index].block_expand, input_layer.name, self.config.inputs[input_index].block_expand_conf) block_expand_conf = self.config.inputs[ input_index].block_expand_conf self.set_layer_size(block_expand_conf.block_x * block_expand_conf.block_y * block_expand_conf.channels) @config_layer('maxout') class MaxOutLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(MaxOutLayer, self).__init__( name, 'maxout', 0, inputs=inputs, **xargs) input_layer = self.get_input_layer(0) maxout_conf = self.config.inputs[0].maxout_conf parse_maxout(self.inputs[0].maxout, input_layer.name, maxout_conf) out_channels = maxout_conf.image_conf.channels / maxout_conf.groups self.set_cnn_layer(name, maxout_conf.image_conf.img_size_y, maxout_conf.image_conf.img_size, out_channels) @config_layer('row_conv') class RowConvLayer(LayerBase): def __init__(self, name, inputs, context_length, **xargs): super(RowConvLayer, self).__init__( name, 'row_conv', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'row convolution layer must have one and only one input.') input_layer = self.get_input_layer(0) row_conv_conf = self.config.inputs[0].row_conv_conf row_conv_conf.context_length = context_length self.set_layer_size(input_layer.size) psize = context_length * input_layer.size dims = [context_length, input_layer.size] self.create_input_parameter(0, psize, dims) @config_layer('clip') class ClipLayer(LayerBase): def __init__(self, name, inputs, min, max, **xargs): super(ClipLayer, self).__init__(name, 'clip', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'ClipLayer must have one and only one input.') config_assert(min < max, 'min must be less than max.') input_layer = self.get_input_layer(0) self.set_layer_size(input_layer.size) self.config.inputs[0].clip_conf.min = min self.config.inputs[0].clip_conf.max = max @config_layer('scale_shift') class ScaleShiftLayer(LayerBase): def __init__(self, name, inputs, bias=True, **xargs): super(ScaleShiftLayer, self).__init__( name, 'scale_shift', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'ScaleShiftLayer must have one and only one input.') input_layer = self.get_input_layer(0) self.set_layer_size(input_layer.size) self.create_input_parameter(0, 1, [1, 1]) self.create_bias_parameter(bias, 1) # key: cost type # value: cost class g_cost_map = {} # define a cost layer without any parameters def define_cost(class_name, cost_type): def init(cls, name, inputs, device=None, coeff=1.): super(type(cls), cls).__init__( name, cost_type, 1, inputs, device=device, coeff=coeff) cls = type(class_name, (LayerBase, ), dict(__init__=init)) global g_cost_map g_cost_map[cost_type] = cls define_cost('MultiClassCrossEntropy', 'multi-class-cross-entropy') define_cost('CrossEntropyOverBeamCostLayer', 'cross_entropy_over_beam') define_cost('RankingCost', 'rank-cost') define_cost('AucValidation', 'auc-validation') define_cost('PnpairValidation', 'pnpair-validation') define_cost('SumOfSquaresCostLayer', 'square_error') define_cost('MultiBinaryLabelCrossEntropy', 'multi_binary_label_cross_entropy') define_cost('SoftBinaryClassCrossEntropy', 'soft_binary_class_cross_entropy') define_cost('HuberTwoClassification', 'huber_classification') define_cost('SumCost', 'sum_cost') define_cost('SmoothL1Cost', 'smooth_l1') @config_layer('hsigmoid') class HierarchicalSigmoidLayer(LayerBase): def __init__(self, name, num_classes, inputs, device=None, bias=True): super(HierarchicalSigmoidLayer, self).__init__( name, 'hsigmoid', 1, inputs=inputs, device=device) config_assert( len(self.inputs) >= 2, 'HierarchicalSigmoidLayer must have at least 2 inputs') self.config.num_classes = num_classes for input_index in xrange(len(self.inputs) - 1): input_layer = self.get_input_layer(input_index) psize = (num_classes - 1) * input_layer.size dims = [num_classes - 1, input_layer.size] self.create_input_parameter(input_index, psize, dims) self.create_bias_parameter(bias, num_classes - 1) ''' lambdaCost for lambdaRank LTR approach Usage: Example: Layer(name = "cost", type = "lambda_cost", NDCG_num = 8, max_sort_size = -1, inputs = ["output", "score"]) Input data: Samples of the same query should be loaded as a sequence, by PyDataProvider etc.. User should provide scores for each sample. The score slot should be the 2nd input of lambdaRank layer. NDCG_num = the size of NDCG, e.g., 5 for NDCG@5. Note: NDCG_num must be less than or equal to the minimum size of lists. max_sort_size = the size of partial sorting in calculating gradient. Note: If max_sort_size = -1, then for each list, the algorithm will sort the entire list to get gradient. In other cases, max_sort_size must be greater than or equal to NDCG_num. max_sort_size can be greater than the size of a list, in which case the algorithm will sort the entire list to get gradient. ''' @config_layer('lambda_cost') class LambdaCost(LayerBase): def __init__(self, name, inputs, NDCG_num=5, max_sort_size=-1, device=None): super(LambdaCost, self).__init__( name, 'lambda_cost', 1, inputs=inputs, device=device) config_assert(len(self.inputs) == 2, 'lambdaCost must have 2 inputs') self.config.NDCG_num = NDCG_num if max_sort_size != -1: config_assert( NDCG_num <= max_sort_size, 'NDCG_num must be less than or equal to max_sort_size') self.config.max_sort_size = max_sort_size @config_layer('huber_regression') class HuberRegressionLoss(LayerBase): def __init__(self, name, inputs, delta=1., coeff=1., device=None): super(HuberRegressionLoss, self).__init__( name, 'huber_regression', 1, inputs=inputs, device=device) config_assert( len(self.inputs) == 2, 'HuberRegression must have 2 inputs') self.config.delta = delta self.config.coeff = coeff @config_layer('nce') class NCELayer(LayerBase): def __init__(self, name, num_classes, inputs, num_neg_samples=10, neg_sampling_dist=None, bias=True, **xargs): super(NCELayer, self).__init__(name, 'nce', 1, inputs=inputs, **xargs) config_assert( len(self.inputs) >= 2, 'NCELayer must have at least 2 inputs') self.config.num_classes = num_classes if neg_sampling_dist is not None: config_assert( len(neg_sampling_dist) == num_classes, 'len(neg_sampling_dist)(%s) is not same as num_classes (%s)' % (len(neg_sampling_dist), num_classes)) s = sum(neg_sampling_dist) config_assert( abs(s - 1) < 1e-5, 'The sum of neg_sampling_dist (%s) is not 1' % s) self.config.neg_sampling_dist.extend(neg_sampling_dist) self.config.num_neg_samples = num_neg_samples num_real_inputs = len(self.inputs) - 1 input_layer = self.get_input_layer(num_real_inputs) config_assert(input_layer.type == 'data', 'Expecting the last input layer of an nce layer to be ' 'a data layer') if (num_real_inputs > 1 and input_layer.size == 1 and self.get_input_layer(num_real_inputs - 1).type == 'data'): # This input layer is assumed to be a sample weight layer num_real_inputs -= 1 for input_index in xrange(num_real_inputs): input_layer = self.get_input_layer(input_index) psize = num_classes * input_layer.size dims = [num_classes, input_layer.size] self.create_input_parameter(input_index, psize, dims) self.create_bias_parameter(bias, num_classes) @config_layer('addto') class AddToLayer(LayerBase): layer_type = 'addto' def __init__(self, name, inputs, bias=True, **xargs): use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) if self.layer_type == "mkldnn_addto": config_assert(use_mkldnn, "mkldnn_addto only support MKLDNN") self.layer_type = 'mkldnn_addto' if use_mkldnn else 'addto' super(AddToLayer, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer') layer_size = self.get_input_layer(0).size # To reserve heght, width, depth. layer_with_hwc = self.get_input_layer(0) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) assert layer_size == input_layer.size if input_layer.height and input_layer.height and input_layer.height: layer_with_hwc = input_layer self.set_layer_size(layer_with_hwc.size) self.set_layer_height_width(layer_with_hwc.height, layer_with_hwc.width) self.set_layer_depth(layer_with_hwc.depth) self.create_bias_parameter(bias, self.config.size) @config_layer('mkldnn_addto') class MKLDNNAddtoLayer(AddToLayer): layer_type = 'mkldnn_addto' @config_layer('agent') class AgentLayer(LayerBase): def __init__(self, name, size, device=None): super(AgentLayer, self).__init__( name, 'agent', size, inputs=[], device=device) @config_layer('gather_agent') class GatherAgentLayer(LayerBase): def __init__(self, name, size, device=None): super(GatherAgentLayer, self).__init__( name, 'gather_agent', size, inputs=[], device=device) @config_layer('scatter_agent') class ScatterAgentLayer(LayerBase): def __init__(self, name, size, width=None, height=None, device=None): super(ScatterAgentLayer, self).__init__( name, 'scatter_agent', size, inputs=[], device=device) if height and width: self.set_layer_height_width(height, width) @config_layer('multiplex') class MultiplexLayer(LayerBase): def __init__(self, name, inputs, size, device=None): super(MultiplexLayer, self).__init__( name, 'multiplex', size, inputs=inputs, device=device) config_assert( len(inputs) > 2, 'MultiplexLayer should have more than 2 inputs.') for i in range(1, len(inputs)): config_assert( self.get_input_layer(i).size == size, "All the input layers except the first one should" "have the same size as the MultiplexLayer.") @config_func def Link(name, has_subseq=False): """ Still keeping has_subseq for backward compatibility """ link_config = LinkConfig() link_config.link_name = name return link_config # memory for recurrent layer group. # *name* and *size* are actual layer's name and size. # If *name* is None, need to provide *memory_name* and need to use # SetMemoryInput() later to specify the layer which this memory remembers. # # return the name of the memory, # use this name if you assign the memory as other layer's input # # boot frame of memory is zeroed by default, # or initialize by boot layer output if *boot_layer* set, # or initialize by trainable bias if *boot_bias* set, # or initialize by a constant id if *boot_with_const_id* set # # Memory can be a sequence if *is_sequence* set, this type of memory # can only be initailized by a *boot_layer* which is a sequence. # @config_func def Memory(name, size, is_sequence=False, boot_layer=None, boot_bias=False, boot_bias_active_type="", boot_with_const_id=None, memory_name=None): if not memory_name: config_assert(name is not None, "name needs cannot be None") memory_name = name + "+delay1" agent_name = memory_name agent_layer = AgentLayer(agent_name, size) config_assert(g_current_submodel.is_recurrent_layer_group, 'Memory should be used in recurrent layer group only') memory = g_current_submodel.memories.add() if name is not None: memory.layer_name = MakeLayerNameInSubmodel(name) memory.link_name = MakeLayerNameInSubmodel(agent_name) options = sum((boot_layer is not None, bool(boot_bias), boot_with_const_id is not None)) config_assert( options <= 1, 'take one option at most from boot_layer, boot_bias, or boot_with_const_id' ) if boot_layer is not None: boot_layer = MakeLayerNameInParentSubmodel(boot_layer) config_assert(boot_layer in g_layer_map, 'boot_layer "%s" does not correspond to a layer name' % boot_layer) memory.boot_layer_name = boot_layer elif boot_bias: memory.boot_bias_parameter_name = agent_layer.create_bias_parameter( boot_bias, size, for_self=False) memory.boot_bias_active_type = boot_bias_active_type elif boot_with_const_id is not None: memory.boot_with_const_id = boot_with_const_id return agent_name @config_func def SetMemoryInput(memory_name, layer_name): memory_name = MakeLayerNameInSubmodel(memory_name) layer_name = MakeLayerNameInSubmodel(layer_name) for mem in g_current_submodel.memories: if mem.link_name == memory_name: mem.layer_name = layer_name return logger.fatal("Nonexistent memory name: " + memory_name) # Generator for recurrent layer group, to use it: # 1. define a id layer as output of layer group # 2. define a memory of this id layer, and assign a boot id(begin of sequence) # 3. define a eos check layer and fill its name in generator's *eos_layer_name* # Sequence generation will stop when eos check return 1 or *max_num_frames* reached. # If *beam_size* is greater than one, generator will use beam search. # in beam search, if *num_results_per_sample* set, one sample sequence can output # multiple results each with a probility. @config_func def Generator( max_num_frames, eos_layer_name="eos_check", num_results_per_sample=1, beam_size=1, log_prob=None, ): generator_config = GeneratorConfig() generator_config.max_num_frames = max_num_frames generator_config.eos_layer_name = eos_layer_name generator_config.num_results_per_sample = num_results_per_sample generator_config.beam_size = beam_size if log_prob is not None: generator_config.log_prob = log_prob return generator_config @config_layer('expand') class ExpandLayer(LayerBase): def __init__(self, name, inputs, trans_type='non-seq', bias=False, **xargs): super(ExpandLayer, self).__init__( name, 'expand', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 2, 'ExpandLayer takes 2 and only 2 inputs') self.config.trans_type = trans_type for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(self.get_input_layer(0).size) self.create_bias_parameter(bias, self.config.size) @config_layer('featmap_expand') class FeatMapExpandLayer(LayerBase): def __init__(self, name, inputs, num_filters=None, as_row_vector=True, bias=False, **xargs): super(FeatMapExpandLayer, self).__init__( name, 'featmap_expand', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'ExpandLayer takes 1 and only 1 inputs') if num_filters is not None: self.config.num_filters = num_filters else: logger.fatal("FeatMapExpandLayer must specify num_filters.") if not as_row_vector: self.config.user_arg = "as_col_vec" self.set_layer_size(self.get_input_layer(0).size * num_filters) @config_layer('max') class MaxLayer(LayerBase): def __init__(self, name, inputs, trans_type='non-seq', bias=False, output_max_index=None, stride=-1, **xargs): super(MaxLayer, self).__init__(name, 'max', 0, inputs=inputs, **xargs) config_assert(len(self.inputs) == 1, 'MaxLayer must have 1 input') if trans_type == 'seq': config_assert(stride == -1, 'subseq does not support stride window') self.config.trans_type = trans_type self.config.seq_pool_stride = stride for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) self.create_bias_parameter(bias, self.config.size) if output_max_index is not None: self.config.output_max_index = output_max_index @config_layer('maxid') class MaxIdLayer(LayerBase): def __init__(self, name, inputs, beam_size=None, device=None): super(MaxIdLayer, self).__init__( name, 'maxid', 0, inputs=inputs, device=device) config_assert(len(self.inputs) == 1, 'MaxIdLayer must have 1 input') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) if beam_size is None: global g_current_submodel if g_current_submodel.HasField("generator"): self.config.beam_size = g_current_submodel.generator.beam_size else: self.config.beam_size = beam_size @config_layer('eos_id') class EosIdLayer(LayerBase): def __init__(self, name, inputs, eos_id, device=None): super(EosIdLayer, self).__init__( name, 'eos_id', 0, inputs=inputs, device=device) config_assert(len(self.inputs) == 1, 'EosIdLayer must have 1 input') self.set_layer_size(2) # boolean output self.config.eos_id = eos_id @config_layer('seqlastins') class SequenceLastInstanceLayer(LayerBase): def __init__(self, name, inputs, trans_type='non-seq', bias=False, stride=-1, **xargs): super(SequenceLastInstanceLayer, self).__init__( name, 'seqlastins', 0, inputs=inputs, **xargs) config_assert( len(inputs) == 1, 'SequenceLastInstanceLayer must have 1 input') if trans_type == 'seq': config_assert(stride == -1, 'subseq does not support stride window') self.config.trans_type = trans_type self.config.seq_pool_stride = stride self.set_layer_size(self.get_input_layer(0).size) self.create_bias_parameter(bias, self.config.size) @config_layer('seqfirstins') class SequenceFirstInstanceLayer(SequenceLastInstanceLayer): def __init__(self, name, inputs, trans_type='non-seq', bias=False, stride=-1, **xargs): super(SequenceFirstInstanceLayer, self).__init__( name, inputs=inputs, trans_type=trans_type, bias=bias, stride=stride, **xargs) self.config.select_first = True @config_layer('seqconcat') class SequenceConcatLayer(LayerBase): def __init__(self, name, inputs, bias=False, **xargs): super(SequenceConcatLayer, self).__init__( name, 'seqconcat', 0, inputs=inputs, **xargs) config_assert( len(inputs) == 2, 'SequenceConcatLayer must have 2 inputs') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) self.create_bias_parameter(bias, self.config.size) @config_layer('seqreshape') class SequenceReshapeLayer(LayerBase): def __init__(self, name, size, inputs, bias=False, **xargs): super(SequenceReshapeLayer, self).__init__( name, 'seqreshape', size, inputs=inputs, **xargs) config_assert( len(inputs) == 1, 'SequenceReshapeLayer must have 1 inputs') self.set_layer_size(size) self.create_bias_parameter(bias, size) @config_layer('subseq') class SubSequenceLayer(LayerBase): def __init__(self, name, inputs, bias=False, **xargs): super(SubSequenceLayer, self).__init__( name, 'subseq', 0, inputs=inputs, **xargs) config_assert(len(inputs) == 3, 'SubSequenceLayer must have 3 inputs') input_layer0 = self.get_input_layer(0) size = input_layer0.size self.set_layer_size(size) self.create_bias_parameter(bias, size) @config_layer('seq_slice') class SeqSliceLayer(LayerBase): def __init__(self, name, inputs, starts, ends, bias=False, **xargs): if isinstance(inputs, list): assert len(inputs) == 1, ('the first input of sequence slice layer ' 'is a single sequence input.') else: inputs = [inputs] if starts is not None: if isinstance(starts, list): assert len(starts) == 1, ( 'the start indices for sequence slice layer cannot ' 'be a list having more than one element.') starts = starts[0] inputs.append(starts) if ends is not None: if isinstance(ends, list): assert len(ends) == 1, ( 'the end indices for sequence slice layer cannot ' 'be a list having more than one element.') ends = ends[0] inputs.append(ends) assert len(inputs) >= 2, ( 'the sequence slice layer has at least two inputs.') super(SeqSliceLayer, self).__init__( name, 'seq_slice', 0, inputs=inputs, **xargs) input_layer0 = self.get_input_layer(0) size = input_layer0.size self.set_layer_size(size) if len(inputs) == 3: assert ( self.get_input_layer(1).size == self.get_input_layer(2).size), ( 'If start and end indices are both given to' 'sequence slice layer, they should have the same width.') elif len(inputs) == 2: self.config.select_first = (starts is not None) @config_layer('sub_nested_seq') class SubNestedSequenceLayer(LayerBase): def __init__(self, name, inputs, selected_indices, bias=False, **xargs): if isinstance(inputs, list): assert len(inputs) == 1, ('the first input of sub_nested_seq ' 'layer is a single nested sequence.') inputs = inputs[0] if isinstance(selected_indices, list): assert len(selected_indices) == 1, ( 'the second input of ' 'sub_nested_seq layer is a single layer which is a ' 'set of selected indices.') selected_indices = selected_indices[0] super(SubNestedSequenceLayer, self).__init__( name, 'sub_nested_seq', 0, inputs=[inputs, selected_indices], **xargs) input_layer0 = self.get_input_layer(0) size = input_layer0.size self.set_layer_size(size) @config_layer('dot_prod') class DotProdLayer(LayerBase): def __init__(self, name, inputs, device=None): super(DotProdLayer, self).__init__( name, 'dot_prod', 0, inputs, device=device) config_assert(len(inputs) == 2, 'DotProdLayer must have 2 inputs.') config_assert( self.get_input_layer(0).size == self.get_input_layer(1).size, "Two inputs should have the same size.") self.set_layer_size(1) @config_layer('out_prod') class OuterProdLayer(LayerBase): def __init__(self, name, inputs, device=None): super(OuterProdLayer, self).__init__( name, 'out_prod', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'OuterProdLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer0.size * input_layer1.size) @config_layer('power') class PowerLayer(LayerBase): def __init__(self, name, inputs, device=None): super(PowerLayer, self).__init__( name, 'power', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'PowerLayer must have 2 inputs') input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer1.size) input_layer0 = self.get_input_layer(0) config_assert(1 == input_layer0.size, 'The left input is the exponent and should be of size 1') @config_layer('slope_intercept') class SlopeInterceptLayer(LayerBase): def __init__(self, name, inputs, slope=1.0, intercept=0.0, device=None): super(SlopeInterceptLayer, self).__init__( name, 'slope_intercept', 0, inputs=inputs, device=device) self.config.slope = slope self.config.intercept = intercept config_assert(len(inputs) == 1, 'SlopeInterceptLayer must have 1 input') input_layer0 = self.get_input_layer(0) self.set_layer_size(input_layer0.size) @config_layer('scaling') class ScalingLayer(LayerBase): def __init__(self, name, inputs, device=None): super(ScalingLayer, self).__init__( name, 'scaling', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'ScalingLayer must have 2 inputs') input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer1.size) input_layer0 = self.get_input_layer(0) config_assert(1 == input_layer0.size, 'The left input should be of size 1') @config_layer('conv_shift') class ConvShiftLayer(LayerBase): def __init__(self, name, inputs, device=None): super(ConvShiftLayer, self).__init__( name, 'conv_shift', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'ConvShiftLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) self.set_layer_size(input_layer0.size) @config_layer('convex_comb') class ConvexCombinationLayer(LayerBase): def __init__(self, name, size, inputs, device=None): super(ConvexCombinationLayer, self).__init__( name, 'convex_comb', size, inputs=inputs, device=device) config_assert( len(self.inputs) == 2, 'ConvexCombinationLayer must have 2 inputs') config_assert( size * self.get_input_layer(0).size == self.get_input_layer(1).size, 'Wrong input size for ConvexCombinationLayer') self.set_layer_size(size) @config_layer('interpolation') class InterpolationLayer(LayerBase): def __init__(self, name, inputs, device=None): super(InterpolationLayer, self).__init__( name, 'interpolation', 0, inputs=inputs, device=device) config_assert( len(self.inputs) == 3, 'InterpolationLayer must have 3 inputs') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) input_layer2 = self.get_input_layer(2) self.set_layer_size(input_layer1.size) config_assert(input_layer0.size == 1, 'weight should be of size 1') config_assert(input_layer1.size == input_layer2.size, 'the two vector inputs should be of the same size') @config_layer('bilinear_interp') class BilinearInterpLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(BilinearInterpLayer, self).__init__( name, 'bilinear_interp', 0, inputs=inputs, **xargs) input_layer = self.get_input_layer(0) conf = self.config.inputs[0].bilinear_interp_conf parse_bilinear(self.inputs[0].bilinear_interp, input_layer.name, conf) self.set_cnn_layer(name, conf.out_size_y, conf.out_size_x, conf.image_conf.channels) @config_layer('sum_to_one_norm') class SumToOneNormLayer(LayerBase): def __init__(self, name, inputs, device=None): super(SumToOneNormLayer, self).__init__( name, 'sum_to_one_norm', 0, inputs=inputs, device=device) config_assert( len(self.inputs) == 1, 'SumToOneNormLayer must have 1 input') input_layer0 = self.get_input_layer(0) self.set_layer_size(input_layer0.size) @config_layer('row_l2_norm') class RowL2NormLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(RowL2NormLayer, self).__init__( name, 'row_l2_norm', 0, inputs=inputs, **xargs) config_assert(len(self.inputs) == 1, 'RowL2NormLayer must have 1 input') input_layer = self.get_input_layer(0) self.set_layer_size(input_layer.size) @config_layer('cos') class CosSimLayer(LayerBase): def __init__(self, name, inputs, cos_scale=1, device=None): super(CosSimLayer, self).__init__( name, 'cos', 1, inputs=inputs, device=device) config_assert( len(self.inputs) == 2, 'The CosSimLayer expects two and only two inputs.') config_assert( self.get_input_layer(0).size == self.get_input_layer(1).size, 'The two inputs of CosSimLayer must have the same dimensionality.') self.config.cos_scale = cos_scale @config_layer('cos_vm') class CosSimVecMatLayer(LayerBase): def __init__(self, name, size, inputs, cos_scale=1.0, device=None): super(CosSimVecMatLayer, self).__init__( name, 'cos_vm', size, inputs=inputs, device=device) self.config.cos_scale = cos_scale config_assert( len(self.inputs) == 2, 'The CosSimVecMatLayer must have 2 inputs.') config_assert( size * self.get_input_layer(0).size == self.get_input_layer(1).size, 'Wrong input size for CosSimVecMatLayer.') @config_layer('l2_distance') class L2DistanceLayer(LayerBase): def __init__(self, name, inputs, device=None): super(L2DistanceLayer, self).__init__( name, 'l2_distance', 1, inputs=inputs, device=device) config_assert( len(self.inputs) == 2, ('The L2DistanceLayer must have ' 'and only have 2 inputs.')) config_assert( self.get_input_layer(0).size == self.get_input_layer(1).size, ('Two inputs of the L2DistanceLayer must have ' 'the same dimensionality.')) @config_layer('sampling_id') class SamplingIdLayer(LayerBase): def __init__(self, name, inputs, device=None): super(SamplingIdLayer, self).__init__( name, 'sampling_id', 0, inputs=inputs, device=device) config_assert( len(self.inputs) == 1, 'SamplingIdLayer must have 1 input') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) # AverageLayer: "average" for each sample within a sequence. # average_stratrgy: set to one of the following: # 'average': plain average. # 'sum': sum each sample instead of average (which is divide by sample_num). # 'squarerootn': sum each sample, but divide by sqrt(sample_num). @config_layer('average') class AverageLayer(LayerBase): def __init__(self, name, inputs, average_strategy='average', trans_type='non-seq', bias=False, stride=-1, **xargs): super(AverageLayer, self).__init__( name, 'average', 0, inputs=inputs, **xargs) self.config.average_strategy = average_strategy if trans_type == 'seq': config_assert(stride == -1, 'subseq does not support stride window') self.config.trans_type = trans_type self.config.seq_pool_stride = stride config_assert(len(inputs) == 1, 'AverageLayer must have 1 input') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) self.create_bias_parameter(bias, self.config.size) @config_layer('tensor') class TensorLayer(LayerBase): def __init__(self, name, size, inputs, bias=True, **xargs): super(TensorLayer, self).__init__( name, 'tensor', size, inputs=inputs, **xargs) config_assert(len(self.inputs) == 2, 'TensorLayer must have 2 inputs') config_assert(size > 0, 'size must be positive') config_assert(inputs[1].parameter_name == None, 'second parameter should be None.') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) psize = size * input_layer0.size * input_layer1.size dims = [input_layer0.size, input_layer1.size, size] self.create_input_parameter(0, psize, dims) self.create_bias_parameter(bias, size) @config_layer('mixed') class MixedLayer(LayerBase): def __init__(self, name, inputs, size=0, bias=True, **xargs): config_assert(inputs, 'inputs cannot be empty') super(MixedLayer, self).__init__( name, 'mixed', size, inputs=inputs, **xargs) operator_input_index = [] for operator in self.operators: operator_conf = operator.operator_conf for i in xrange(1, len(operator.input_layer_names)): input_index = len(self.config.inputs) operator_conf.input_indices.append(input_index) input_config = Input(operator.input_layer_names[i]) self.inputs.append(input_config) layer_input = self.config.inputs.add() layer_input.input_layer_name = input_config.input_layer_name for input_index in operator_conf.input_indices: input_layer = self.get_input_layer(input_index) operator_conf.input_sizes.append(input_layer.size) operator_input_index.append(input_index) if self.config.size == 0: size = operator.calc_output_size(operator_conf.input_sizes) if size != 0: self.set_layer_size(size) else: sz = operator.calc_output_size(operator_conf.input_sizes) if sz != 0: config_assert( sz == self.config.size, "different inputs have different size: %s vs. %s" % (sz, self.config.size)) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) input = self.inputs[input_index] if input_index not in operator_input_index: config_assert( isinstance(input, Projection), "input should be projection or operation") if self.config.size == 0 and isinstance(input, Projection): size = input.calc_output_size(input_layer) if size != 0: self.set_layer_size(size) elif isinstance(input, Projection): sz = input.calc_output_size(input_layer) if sz != 0: config_assert( sz == self.config.size, "different inputs have different size: %s vs. %s" % (sz, self.config.size)) config_assert(size != 0, "size is not set") for input_index in xrange(len(self.inputs)): input = self.inputs[input_index] if isinstance(input, Projection): input_layer = self.get_input_layer(input_index) input.proj_conf.input_size = input_layer.size input.proj_conf.output_size = size input_config = self.config.inputs[input_index] input_config.proj_conf.CopyFrom(input.proj_conf) input_config.proj_conf.name = gen_parameter_name(name, input_index) psize = input.calc_parameter_size(input_layer.size, size) dims = input.calc_parameter_dims(input_layer.size, size) self.create_input_parameter(input_index, psize, dims) for operator in self.operators: operator_conf = operator.operator_conf operator_conf.output_size = self.config.size operator.check_dims() record_operator_conf = self.config.operator_confs.add() record_operator_conf.CopyFrom(operator_conf) psize = self.config.size if isinstance(self.inputs[0], ConvProjection): self.config.shared_biases = True psize = 0 for input in self.inputs: psize += input.calc_bias_size() if bias: self.config.bias_size = psize self.create_bias_parameter(bias, psize) # like MixedLayer, but no bias parameter @config_func def ExpressionLayer(name, inputs, **xargs): MixedLayer(name, inputs, bias=False, **xargs) @config_layer('concat') class ConcatenateLayer(LayerBase): layer_type = 'concat' def __init__(self, name, inputs, bias=False, **xargs): config_assert(inputs, 'inputs cannot be empty') config_assert(not bias, 'ConcatenateLayer cannot support bias.') use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) if self.layer_type == "mkldnn_concat": config_assert(use_mkldnn, "mkldnn_concat only support MKLDNN") self.layer_type = 'mkldnn_concat' if use_mkldnn else 'concat' super(ConcatenateLayer, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) size = 0 for input_index in xrange(len(self.inputs)): assert self.get_input_layer(0).height == self.get_input_layer( input_index).height assert self.get_input_layer(0).width == self.get_input_layer( input_index).width assert self.get_input_layer(0).depth == self.get_input_layer( input_index).depth input_layer = self.get_input_layer(input_index) input = self.inputs[input_index] if self.config.size == 0: size += input_layer.size self.set_layer_height_width(self.get_input_layer(0).height, \ self.get_input_layer(0).width) self.set_layer_depth(self.get_input_layer(0).depth) self.set_layer_size(size) @config_layer('mkldnn_concat') class MKLDNNConcatLayer(ConcatenateLayer): layer_type = 'mkldnn_concat' # like concat layer, but each input layer was processed by a Projection. @config_layer('concat2') class ConcatenateLayer2(LayerBase): def __init__(self, name, inputs, bias=False, **xargs): config_assert(inputs, 'inputs cannot be empty') super(ConcatenateLayer2, self).__init__( name, 'concat2', 0, inputs=inputs, **xargs) if isinstance(self.inputs[0], ConvProjection): for input_index in xrange(len(self.inputs) - 1): input = self.inputs[input_index + 1] config_assert( isinstance(input, ConvProjection), "The first input of ConcatenateLayer2 is ConvProjection, " "the other inputs should also be ConvProjection.") size = 0 for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) input = self.inputs[input_index] output_size = input.calc_output_size(input_layer) config_assert(output_size != 0, "proj output size is not set") size += output_size self.set_layer_size(size) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) input = self.inputs[input_index] input.proj_conf.input_size = input_layer.size input.proj_conf.output_size = input.calc_output_size(input_layer) input_config = self.config.inputs[input_index] input_config.proj_conf.CopyFrom(input.proj_conf) input_config.proj_conf.name = gen_parameter_name(name, input_index) psize = input.calc_parameter_size(input.proj_conf.input_size, input.proj_conf.output_size) dims = input.calc_parameter_dims(input.proj_conf.input_size, input.proj_conf.output_size) self.create_input_parameter(input_index, psize, dims) psize = self.config.size if isinstance(self.inputs[0], ConvProjection): self.config.shared_biases = True psize = 0 for input in self.inputs: psize += input.calc_bias_size() if bias: self.config.bias_size = psize self.create_bias_parameter(bias, psize) @config_layer('recurrent') class RecurrentLayer(LayerBase): layer_type = 'recurrent' def __init__(self, name, inputs, reversed=False, bias=True, **xargs): use_mkl_packed = bool( int(g_command_config_args.get("use_mkl_packed", 0))) self.layer_type = 'mkl_packed_recurrent' if use_mkl_packed else 'recurrent' super(RecurrentLayer, self).__init__(name, self.layer_type, 0, inputs, **xargs) config_assert(len(self.inputs) == 1, 'RecurrentLayer must have 1 input') input_layer = self.get_input_layer(0) size = input_layer.size self.set_layer_size(size) self.config.reversed = reversed dims = [size, size] self.create_input_parameter(0, size * size, dims) self.create_bias_parameter(bias, self.config.size) @config_layer('lstmemory') class LstmLayer(LayerBase): def __init__(self, name, inputs, reversed=False, active_gate_type="sigmoid", active_state_type="sigmoid", bias=True, **xargs): super(LstmLayer, self).__init__(name, 'lstmemory', 0, inputs, **xargs) config_assert(len(self.inputs) == 1, 'LstmLayer must have 1 input') input_layer = self.get_input_layer(0) #check input_layer.size is divided by 4 config_assert(input_layer.size % 4 == 0, "size % 4 should be 0!") size = input_layer.size / 4 self.set_layer_size(size) self.config.reversed = reversed self.config.active_gate_type = active_gate_type self.config.active_state_type = active_state_type self.create_input_parameter(0, size * size * 4, [size, size, 4]) #bias includes 3 kinds of peephole, 4 + 3 = 7 self.create_bias_parameter(bias, size * 7) @config_layer('lstm_step') class LstmStepLayer(LayerBase): def __init__(self, name, size, inputs, active_gate_type="sigmoid", active_state_type="sigmoid", bias=True, **xargs): super(LstmStepLayer, self).__init__(name, 'lstm_step', size, inputs, **xargs) config_assert(len(inputs) == 2, 'LstmStepLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) config_assert(input_layer0.size == 4 * size, 'input_layer0.size != 4 * layer.size') config_assert(input_layer1.size == size, 'input_layer1.size != layer.size') self.config.active_gate_type = active_gate_type self.config.active_state_type = active_state_type self.create_bias_parameter(bias, size * 3) # get the specific output from the input layer. @config_layer('get_output') class GetOutputLayer(LayerBase): def __init__(self, name, size, inputs): super(GetOutputLayer, self).__init__(name, 'get_output', size, inputs) config_assert( len(self.inputs) == 1, 'GetOutputLayer must have 1 inputs') inputs = self.inputs[0] config_assert(inputs.input_layer_argument, 'input_layer_argument cannot be empty') @config_layer('mdlstmemory') class MDLstmLayer(LayerBase): def __init__(self, name, inputs, directions=True, active_gate_type="sigmoid", active_state_type="sigmoid", bias=True, **xargs): super(MDLstmLayer, self).__init__(name, 'mdlstmemory', 0, inputs, **xargs) config_assert(len(self.inputs) == 1, 'MDLstmLayer must have 1 input') input_layer = self.get_input_layer(0) dim_num = len(directions) #check input_layer.size is divided by (3+dim_num) config_assert(input_layer.size % (3 + dim_num) == 0, "size % (dim_num) should be 0!") size = input_layer.size / (3 + dim_num) self.set_layer_size(size) self.config.active_gate_type = active_gate_type self.config.active_state_type = active_state_type for i in xrange(len(directions)): self.config.directions.append(int(directions[i])) self.create_input_parameter(0, size * size * (3 + dim_num), [size, size, 3 + dim_num]) #bias includes 3 kinds of peephole, 3+dim_num+2+dim_num self.create_bias_parameter(bias, size * (5 + 2 * dim_num)) @config_layer('gated_recurrent') class GatedRecurrentLayer(LayerBase): def __init__(self, name, inputs, reversed=False, active_gate_type="sigmoid", bias=True, **xargs): super(GatedRecurrentLayer, self).__init__(name, 'gated_recurrent', 0, inputs, **xargs) config_assert( len(self.inputs) == 1, 'GatedRecurrentLayer must have 1 input') input_layer = self.get_input_layer(0) #check input_layer.size is divided by 3 config_assert(input_layer.size % 3 == 0, "size % 3 should be 0!") size = input_layer.size / 3 self.set_layer_size(size) self.config.reversed = reversed self.config.active_gate_type = active_gate_type self.create_input_parameter(0, size * size * 3, [size, size * 3]) self.create_bias_parameter(bias, size * 3) @config_layer('gru_step') class GruStepLayer(LayerBase): def __init__(self, name, size, inputs, active_gate_type="sigmoid", bias=True, **xargs): super(GruStepLayer, self).__init__(name, 'gru_step', size, inputs, **xargs) config_assert(len(self.inputs) == 2, 'GruStepLayer must have 2 input') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) config_assert(input_layer0.size == 3 * size, 'input_layer0.size != 3 * layer.size') config_assert(input_layer1.size == size, 'input_layer1.size != layer.size') self.config.active_gate_type = active_gate_type self.create_input_parameter(0, size * size * 3, [size, size * 3]) self.create_bias_parameter(bias, size * 3) ''' A layer for calculating the cost of sequential conditional random field model. Example: CRFLayer(name="crf_cost", size=label_num, inputs=["output", "label", "weight"]) where "weight" is optional, one weight for each sequence @param coeff: weight of the layer ''' @config_layer('crf') class CRFLayer(LayerBase): def __init__(self, name, size, inputs, coeff=1.0, device=None): super(CRFLayer, self).__init__(name, 'crf', size, inputs, device=device) config_assert(2 <= len(self.inputs) <= 3, 'CRFLayer must have 2 or 3 inputs') self.create_input_parameter(0, size * (size + 2), [size + 2, size]) self.config.coeff = coeff ''' A layer for calculating the decoding sequence of sequential conditional random field model. The decoding sequence is stored in output_.ids If a second input is provided, it is treated as the ground-truth label, and this layer will also calculate error, output_.value[i] is 1 for incorrect decoding or 0 for correct decoding ''' @config_layer('crf_decoding') class CRFDecodingLayer(LayerBase): def __init__(self, name, size, inputs, device=None): super(CRFDecodingLayer, self).__init__( name, 'crf_decoding', size, inputs, device=device) config_assert( len(self.inputs) <= 2, 'CRFDecodingLayer cannot have more than 2 inputs') self.create_input_parameter(0, size * (size + 2), [size + 2, size]) @config_layer('ctc') class CTCLayer(LayerBase): def __init__(self, name, size, inputs, norm_by_times=False, device=None): super(CTCLayer, self).__init__(name, 'ctc', size, inputs, device=device) self.config.norm_by_times = norm_by_times config_assert(len(self.inputs) == 2, 'CTCLayer must have 2 inputs') @config_layer('kmax_seq_score') class KmaxSeqScoreLayer(LayerBase): def __init__(self, name, inputs, beam_size, **xargs): super(KmaxSeqScoreLayer, self).__init__( name, 'kmax_seq_score', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'KmaxSeqScoreLayer has only one input.') self.config.beam_size = beam_size @config_layer('warp_ctc') class WarpCTCLayer(LayerBase): def __init__(self, name, size, inputs, blank=0, norm_by_times=False, device=None): super(WarpCTCLayer, self).__init__( name, 'warp_ctc', size=size, inputs=inputs, device=device) self.config.blank = blank self.config.norm_by_times = norm_by_times config_assert(len(self.inputs) == 2, 'WarpCTCLayer must have 2 inputs') input_layer = self.get_input_layer(0) config_assert( (input_layer.active_type == '' or input_layer.active_type == 'linear'), "Expecting the active_type of input layer to be linear or null") @config_layer('recurrent_layer_group') class RecurrentLayerGroup(LayerBase): def __init__(self, name, device=None): super(RecurrentLayerGroup, self).__init__( name, 'recurrent_layer_group', 0, inputs=[], device=device) @config_layer('switch_order') class SwitchOrderLayer(LayerBase): def __init__(self, name, inputs, reshape, **xargs): super(SwitchOrderLayer, self).__init__( name, 'switch_order', 0, inputs=inputs, **xargs) self.config.reshape_conf.height_axis.extend(reshape['height']) self.config.reshape_conf.width_axis.extend(reshape['width']) input_layer = self.get_input_layer(0) if reshape is None: self.set_layer_size(input_layer.size) else: in_h = input_layer.height in_w = input_layer.width out_dims = None if input_layer.has_depth(): in_d = input_layer.depth in_c = input_layer.size / in_h / in_w / in_d # batch_size, depth, height, width, channel out_dims = [0, in_d, in_h, in_w, in_c] else: in_c = input_layer.size / in_h / in_w # batch_size, height, width, channel out_dims = [0, in_h, in_w, in_c] # Because (reshape['width'][0] > 0) always be true. # So out_dims[0] won't be used. size = reduce(lambda x, y: x * y, out_dims[reshape['width'][0]:]) self.set_layer_size(size) @config_layer('scale_sub_region') class ScaleSubRegionLayer(LayerBase): def __init__(self, name, inputs, value, **xargs): super(ScaleSubRegionLayer, self).__init__( name, 'scale_sub_region', 0, inputs=inputs, **xargs) scale_sub_region_conf = self.config.inputs[0].scale_sub_region_conf scale_sub_region_conf.value = value # get channel, width and height from input_0 layer input_layer = self.get_input_layer(0) image_conf = scale_sub_region_conf.image_conf image_conf.img_size = input_layer.width image_conf.img_size_y = input_layer.height image_conf.channels = input_layer.size / (input_layer.width * input_layer.height) self.set_cnn_layer(name, image_conf.img_size_y, image_conf.img_size, image_conf.channels) @config_layer('factorization_machine') class FactorizationMachineLayer(LayerBase): def __init__(self, name, inputs, factor_size, **xargs): super(FactorizationMachineLayer, self).__init__( name, 'factorization_machine', size=1, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'factorization machine layer must have one and only one input.') self.config.factor_size = factor_size input_layer = self.get_input_layer(0) psize = input_layer.size * factor_size dims = [input_layer.size, factor_size] self.create_input_parameter(0, psize, dims) # Deprecated, use a new layer specific class instead @config_func def Layer(name, type, **xargs): layers = {} layers.update(g_cost_map) layers.update(g_layer_type_map) layer_func = layers.get(type) config_assert(layer_func, "layer type '%s' not supported." % type) return layer_func(name, **xargs) @config_func def ParameterHook(type, **kwargs): if type == 'pruning': hook = ParameterUpdaterHookConfig() hook.type = type sparsity_ratio = kwargs.get('sparsity_ratio', None) if sparsity_ratio is not None: hook.sparsity_ratio = sparsity_ratio return hook elif type == 'dpruning': hook = ParameterUpdaterHookConfig() hook.type = type return hook else: return None @config_func def Parameter(name, size, device, dims, learning_rate=None, momentum=None, decay_rate=None, decay_rate_l1=None, initial_mean=None, initial_std=None, initial_strategy=None, initial_smart=None, num_batches_regularization=None, sparse_remote_update=None, sparse_update=None, gradient_clipping_threshold=None, sparse=None, format=None, need_compact=None, is_static=None, is_shared=None, update_hooks=None, initializer=None): config_assert(name not in g_parameter_map, 'Duplicated parameter name: ' + name) para = g_config.model_config.parameters.add() para.name = name para.size = size if device is not None: para.device = int(device) para.dims.extend(dims) if learning_rate is not None: para.learning_rate = float(learning_rate) momentum = default(momentum, g_default_momentum) if momentum is not None: para.momentum = float(momentum) config_assert(not momentum or not decay_rate_l1, "momentum and decay_rate_l1 cannot both be non-zero") decay_rate = default(decay_rate, g_default_decay_rate) if decay_rate is not None: para.decay_rate = decay_rate if decay_rate_l1 is not None: para.decay_rate_l1 = decay_rate_l1 para.initial_std = default(initial_std, g_default_initial_std) para.initial_mean = default(initial_mean, g_default_initial_mean) num_batches_regularization = default(num_batches_regularization, g_default_num_batches_regularization) if num_batches_regularization is not None: para.num_batches_regularization = int(num_batches_regularization) if sparse_remote_update is not None: para.sparse_remote_update = sparse_remote_update if sparse_remote_update: g_config.opt_config.use_sparse_remote_updater = True if sparse_update is not None: para.sparse_update = sparse_update gradient_clipping_threshold = default(gradient_clipping_threshold, g_default_gradient_clipping_threshold) if gradient_clipping_threshold is not None: para.gradient_clipping_threshold = gradient_clipping_threshold para.initial_strategy = default(initial_strategy, g_default_initial_strategy) para.initial_smart = default(initial_smart, g_default_initial_smart) if para.initial_smart: para.initial_mean = 0. if len(para.dims) != 0: para.initial_std = 1. / math.sqrt(para.dims[0]) else: print( "Use initial_smart, but dims not set. Initial_smart may not be used in this layer" ) traceback.print_exc() para.initial_std = 1. / math.sqrt(para.size) if g_default_compact_func is not None: sparse, format, need_compact = g_default_compact_func(para.name) if sparse is not None: para.is_sparse = sparse if format is not None: para.format = format if need_compact is not None: para.need_compact = need_compact if is_static is not None: para.is_static = is_static config_assert(not para.sparse_remote_update or not para.is_static, "sparse_remote_update and is_static cannot both be true") if is_shared is not None: para.is_shared = is_shared update_hooks = default(update_hooks, g_default_update_hooks) if update_hooks is not None: if hasattr(update_hooks, '__call__'): update_hooks = update_hooks() if isinstance(update_hooks, list): for hook in update_hooks: para.update_hooks.extend([hook]) else: para.update_hooks.extend([update_hooks]) g_parameter_map[name] = para if initializer is not None: config_assert( callable(initializer), "parameter initializer should be a callable object") g_parameter_initializer_map[name] = initializer @config_func def default_initial_std(val): global g_default_initial_std g_default_initial_std = val @config_func def default_initial_mean(val): global g_default_initial_mean g_default_initial_mean = val @config_func def default_initial_strategy(val): global g_default_initial_strategy g_default_initial_strategy = val @config_func def default_initial_smart(val): global g_default_initial_smart g_default_initial_smart = val @config_func def default_momentum(val): global g_default_momentum g_default_momentum = val @config_func def default_decay_rate(val): global g_default_decay_rate g_default_decay_rate = val @config_func def default_num_batches_regularization(val): global g_default_num_batches_regularization g_default_num_batches_regularization = val @config_func def default_gradient_clipping_threshold(val): global g_default_gradient_clipping_threshold g_default_gradient_clipping_threshold = val @config_func def default_device(val): global g_default_device g_default_device = val @config_func def default_update_hooks(val): global g_default_update_hooks g_default_update_hooks = val @config_func def default_compact_func(val): global g_default_compact_func g_default_compact_func = val def make_importer(config_dir, config_args): def Import(config_file, local_args={}): if not config_file.startswith('/'): config_file = config_dir + '/' + config_file g_config.config_files.append(config_file) execfile(config_file, make_config_environment(config_file, config_args), local_args) return Import DEFAULT_SETTING = dict( batch_size=None, mini_batch_size=None, algorithm='async_sgd', async_lagged_grad_discard_ratio=1.5, learning_method='momentum', gradient_clipping_threshold=None, num_batches_per_send_parameter=None, num_batches_per_get_parameter=None, center_parameter_update_method=None, learning_rate=1., learning_rate_decay_a=0., learning_rate_decay_b=0., learning_rate_schedule='poly', learning_rate_args='', l1weight=0.1, l2weight=0., l2weight_zero_iter=0, c1=0.0001, backoff=0.5, owlqn_steps=10, max_backoff=5, average_window=0, do_average_in_cpu=False, max_average_window=None, ada_epsilon=1e-6, ada_rou=0.95, delta_add_rate=1.0, shrink_parameter_value=0, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-8, ) settings = copy.deepcopy(DEFAULT_SETTING) settings_deprecated = dict(usage_ratio=1., ) trainer_settings = dict( save_dir="./output/model", init_model_path=None, start_pass=0, ) @config_func def Settings(**args): for k, v in args.iteritems(): if k == "usage_ratio": logger.warning( "Deprecated: define usage_ratio in DataConfig instead") if g_config.HasField("data_config"): g_config.data_config.__setattr__(k, v) settings_deprecated[k] = v continue elif k in settings: settings[k] = v elif k in trainer_settings: trainer_settings[k] = v else: logger.fatal('Unkown setting: %s' % k) @config_func def cluster_config(**args): pass @config_func def EnableSubmodelSuffix(flag=True): """ If enabled, the layer and evaluator names in submodel will be automatically appended with @submodel_name """ global g_add_submodel_suffix g_add_submodel_suffix = flag def make_config_environment(config_file, config_args): def make_setter(k): def setter(v): logger.fatal("Obsolete: use Settings(%s=%s, ...) instead" % (k, v)) return setter funcs = {} funcs.update(g_config_funcs) for k in settings.iterkeys(): funcs[k] = make_setter(k) for k in settings_deprecated.iterkeys(): funcs[k] = make_setter(k) config_dir = os.path.dirname(config_file) if not config_dir: config_dir = '.' funcs.update( Import=make_importer(config_dir, config_args), get_config_arg=make_get_config_arg(config_args), ) funcs.update(g_extended_config_funcs) return funcs def make_get_config_arg(config_args): def get_config_arg(name, type, default=None): if type == bool: s = config_args.get(name) if not s: return default if s == 'True' or s == '1' or s == 'true': return True if s == 'False' or s == '0' or s == 'false': return False raise ValueError('Value of config_arg %s is not boolean' % name) else: return type(config_args.get(name, default)) return get_config_arg def importlib(name): __import__(name) return sys.modules[name] def find_caller(): stack = traceback.extract_stack() for s in stack[-4::-1]: if not s[0].endswith('config_parser.py'): return s[0], s[1], s[2] return "(unknown file)", 0, "(unknown function)" def my_fatal(s): logger.critical(s) raise Exception() _parse_config_hooks = set() def register_parse_config_hook(f): """ Register a hook function for parse_config. parse_config will invoke the hook at the beginning of parse. This make it possible to reset global state for for constructing the model. """ _parse_config_hooks.add(f) def update_g_config(): ''' Update g_config after execute config_file or config_functions. ''' for k, v in settings.iteritems(): if v is None: continue g_config.opt_config.__setattr__(k, v) for k, v in trainer_settings.iteritems(): if v is None: continue g_config.__setattr__(k, v) for name in g_config.model_config.input_layer_names: assert name in g_layer_map, \ 'input name "%s" does not correspond to a layer name' % name assert (g_layer_map[name].type == "data" or g_layer_map[name].type == "data_trim"), \ 'The type of input layer "%s" is not "data"' % name for name in g_config.model_config.output_layer_names: assert name in g_layer_map, \ 'input name "%s" does not correspond to a layer name' % name return g_config def begin_parse(): init_config_environment() for hook in _parse_config_hooks: hook() logger.findCaller = find_caller logger.fatal = my_fatal g_config.model_config.type = "nn" global g_current_submodel, g_root_submodel g_root_submodel = g_config.model_config.sub_models.add() g_root_submodel.name = 'root' g_root_submodel.is_recurrent_layer_group = False g_current_submodel = g_root_submodel def parse_config(trainer_config, config_arg_str): ''' @param config_arg_str: a string of the form var1=val1,var2=val2. It will be passed to config script as a dictionary CONFIG_ARGS ''' begin_parse() config_args = {} if config_arg_str: config_args = dict([f.split('=') for f in config_arg_str.split(',')]) global g_command_config_args g_command_config_args.update(config_args) extension_module_name = config_args.get('extension_module_name') if extension_module_name: global g_extended_config_funcs extension_module = importlib(extension_module_name) g_extended_config_funcs = extension_module.get_config_funcs(g_config) if hasattr(trainer_config, '__call__'): trainer_config.func_globals.update( make_config_environment("", config_args)) trainer_config() else: execfile(trainer_config, make_config_environment(trainer_config, config_args)) return update_g_config() def parse_config_and_serialize(trainer_config, config_arg_str): try: config = parse_config(trainer_config, config_arg_str) #logger.info(config) return config.SerializeToString() except: traceback.print_exc() raise if __name__ == '__main__': try: config = parse_config(sys.argv[1], '') config.SerializeToString() __real_print__(str(config)) except: traceback.print_exc() raise
166,008
36.322167
111
py
Paddle
Paddle-master/python/paddle/trainer/__init__.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
609
42.571429
74
py
Paddle
Paddle-master/python/paddle/dataset/wmt16.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ACL2016 Multimodal Machine Translation. Please see this website for more details: http://www.statmt.org/wmt16/multimodal-task.html#task1 If you use the dataset created for your task, please cite the following paper: Multi30K: Multilingual English-German Image Descriptions. @article{elliott-EtAl:2016:VL16, author = {{Elliott}, D. and {Frank}, S. and {Sima"an}, K. and {Specia}, L.}, title = {Multi30K: Multilingual English-German Image Descriptions}, booktitle = {Proceedings of the 6th Workshop on Vision and Language}, year = {2016}, pages = {70--74}, year = 2016 } """ import os import tarfile import gzip from collections import defaultdict import paddle.dataset.common __all__ = [ "train", "test", "validation", "convert", "fetch", "get_dict", ] DATA_URL = ("http://cloud.dlnel.org/filepub/" "?uuid=46a0808e-ddd8-427c-bacd-0dbc6d045fed") DATA_MD5 = "0c38be43600334966403524a40dcd81e" TOTAL_EN_WORDS = 11250 TOTAL_DE_WORDS = 19220 START_MARK = "<s>" END_MARK = "<e>" UNK_MARK = "<unk>" def __build_dict(tar_file, dict_size, save_path, lang): word_dict = defaultdict(int) with tarfile.open(tar_file, mode="r") as f: for line in f.extractfile("wmt16/train"): line_split = line.strip().split("\t") if len(line_split) != 2: continue sen = line_split[0] if lang == "en" else line_split[1] for w in sen.split(): word_dict[w] += 1 with open(save_path, "w") as fout: fout.write("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)) for idx, word in enumerate( sorted( word_dict.iteritems(), key=lambda x: x[1], reverse=True)): if idx + 3 == dict_size: break fout.write("%s\n" % (word[0])) def __load_dict(tar_file, dict_size, lang, reverse=False): dict_path = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16/%s_%d.dict" % (lang, dict_size)) if not os.path.exists(dict_path) or ( len(open(dict_path, "r").readlines()) != dict_size): __build_dict(tar_file, dict_size, dict_path, lang) word_dict = {} with open(dict_path, "r") as fdict: for idx, line in enumerate(fdict): if reverse: word_dict[idx] = line.strip() else: word_dict[line.strip()] = idx return word_dict def __get_dict_size(src_dict_size, trg_dict_size, src_lang): src_dict_size = min(src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else TOTAL_DE_WORDS)) trg_dict_size = min(trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else TOTAL_EN_WORDS)) return src_dict_size, trg_dict_size def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang): def reader(): src_dict = __load_dict(tar_file, src_dict_size, src_lang) trg_dict = __load_dict(tar_file, trg_dict_size, ("de" if src_lang == "en" else "en")) # the indice for start mark, end mark, and unk are the same in source # language and target language. Here uses the source language # dictionary to determine their indices. start_id = src_dict[START_MARK] end_id = src_dict[END_MARK] unk_id = src_dict[UNK_MARK] src_col = 0 if src_lang == "en" else 1 trg_col = 1 - src_col with tarfile.open(tar_file, mode="r") as f: for line in f.extractfile(file_name): line_split = line.strip().split("\t") if len(line_split) != 2: continue src_words = line_split[src_col].split() src_ids = [start_id] + [ src_dict.get(w, unk_id) for w in src_words ] + [end_id] trg_words = line_split[trg_col].split() trg_ids = [trg_dict.get(w, unk_id) for w in trg_words] trg_ids_next = trg_ids + [end_id] trg_ids = [start_id] + trg_ids yield src_ids, trg_ids, trg_ids_next return reader def train(src_dict_size, trg_dict_size, src_lang="en"): """ WMT16 train set reader. This function returns the reader for train data. Each sample the reader returns is made up of three fields: the source language word index sequence, target language word index sequence and next word index sequence. NOTE: The original like for training data is: http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz paddle.dataset.wmt16 provides a tokenized version of the original dataset by using moses's tokenization script: https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl Args: src_dict_size(int): Size of the source language dictionary. Three special tokens will be added into the dictionary: <s> for start mark, <e> for end mark, and <unk> for unknown word. trg_dict_size(int): Size of the target language dictionary. Three special tokens will be added into the dictionary: <s> for start mark, <e> for end mark, and <unk> for unknown word. src_lang(string): A string indicating which language is the source language. Available options are: "en" for English and "de" for Germany. Returns: callable: The train reader. """ if src_lang not in ["en", "de"]: raise ValueError("An error language type. Only support: " "en (for English); de(for Germany).") src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, src_lang) return reader_creator( tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz"), file_name="wmt16/train", src_dict_size=src_dict_size, trg_dict_size=trg_dict_size, src_lang=src_lang) def test(src_dict_size, trg_dict_size, src_lang="en"): """ WMT16 test set reader. This function returns the reader for test data. Each sample the reader returns is made up of three fields: the source language word index sequence, target language word index sequence and next word index sequence. NOTE: The original like for test data is: http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz paddle.dataset.wmt16 provides a tokenized version of the original dataset by using moses's tokenization script: https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl Args: src_dict_size(int): Size of the source language dictionary. Three special tokens will be added into the dictionary: <s> for start mark, <e> for end mark, and <unk> for unknown word. trg_dict_size(int): Size of the target language dictionary. Three special tokens will be added into the dictionary: <s> for start mark, <e> for end mark, and <unk> for unknown word. src_lang(string): A string indicating which language is the source language. Available options are: "en" for English and "de" for Germany. Returns: callable: The test reader. """ if src_lang not in ["en", "de"]: raise ValueError("An error language type. " "Only support: en (for English); de(for Germany).") src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, src_lang) return reader_creator( tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz"), file_name="wmt16/test", src_dict_size=src_dict_size, trg_dict_size=trg_dict_size, src_lang=src_lang) def validation(src_dict_size, trg_dict_size, src_lang="en"): """ WMT16 validation set reader. This function returns the reader for validation data. Each sample the reader returns is made up of three fields: the source language word index sequence, target language word index sequence and next word index sequence. NOTE: The original like for validation data is: http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz paddle.dataset.wmt16 provides a tokenized version of the original dataset by using moses's tokenization script: https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl Args: src_dict_size(int): Size of the source language dictionary. Three special tokens will be added into the dictionary: <s> for start mark, <e> for end mark, and <unk> for unknown word. trg_dict_size(int): Size of the target language dictionary. Three special tokens will be added into the dictionary: <s> for start mark, <e> for end mark, and <unk> for unknown word. src_lang(string): A string indicating which language is the source language. Available options are: "en" for English and "de" for Germany. Returns: callable: The validation reader. """ if src_lang not in ["en", "de"]: raise ValueError("An error language type. " "Only support: en (for English); de(for Germany).") src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, src_lang) return reader_creator( tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz"), file_name="wmt16/val", src_dict_size=src_dict_size, trg_dict_size=trg_dict_size, src_lang=src_lang) def get_dict(lang, dict_size, reverse=False): """ return the word dictionary for the specified language. Args: lang(string): A string indicating which language is the source language. Available options are: "en" for English and "de" for Germany. dict_size(int): Size of the specified language dictionary. reverse(bool): If reverse is set to False, the returned python dictionary will use word as key and use index as value. If reverse is set to True, the returned python dictionary will use index as key and word as value. Returns: dict: The word dictionary for the specific language. """ if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS) else: dict_size = min(dict_size, TOTAL_DE_WORDS) dict_path = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16/%s_%d.dict" % (lang, dict_size)) assert os.path.exists(dict_path), "Word dictionary does not exist. " "Please invoke paddle.dataset.wmt16.train/test/validation first " "to build the dictionary." tar_file = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16.tar.gz") return __load_dict(tar_file, dict_size, lang, reverse) def fetch(): """download the entire dataset. """ paddle.v4.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz") def convert(path, src_dict_size, trg_dict_size, src_lang): """Converts dataset to recordio format. """ paddle.dataset.common.convert( path, train( src_dict_size=src_dict_size, trg_dict_size=trg_dict_size, src_lang=src_lang), 1000, "wmt16_train") paddle.dataset.common.convert( path, test( src_dict_size=src_dict_size, trg_dict_size=trg_dict_size, src_lang=src_lang), 1000, "wmt16_test") paddle.dataset.common.convert( path, validation( src_dict_size=src_dict_size, trg_dict_size=trg_dict_size, src_lang=src_lang), 1000, "wmt16_validation")
13,364
37.185714
90
py
Paddle
Paddle-master/python/paddle/dataset/image.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains some common interfaces for image preprocess. Many users are confused about the image layout. We introduce the image layout as follows. - CHW Layout - The abbreviations: C=channel, H=Height, W=Width - The default layout of image opened by cv2 or PIL is HWC. PaddlePaddle only supports the CHW layout. And CHW is simply a transpose of HWC. It must transpose the input image. - Color format: RGB or BGR OpenCV use BGR color format. PIL use RGB color format. Both formats can be used for training. Noted that, the format should be keep consistent between the training and inference peroid. """ import numpy as np try: import cv2 except ImportError: cv2 = None import os import tarfile import cPickle __all__ = [ "load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop", "random_crop", "left_right_flip", "simple_transform", "load_and_transform", "batch_images_from_tar" ] def batch_images_from_tar(data_file, dataset_name, img2label, num_per_batch=1024): """ Read images from tar file and batch them into batch file. :param data_file: path of image tar file :type data_file: string :param dataset_name: 'train','test' or 'valid' :type dataset_name: string :param img2label: a dic with image file name as key and image's label as value :type img2label: dic :param num_per_batch: image number per batch file :type num_per_batch: int :return: path of list file containing paths of batch file :rtype: string """ batch_dir = data_file + "_batch" out_path = "%s/%s" % (batch_dir, dataset_name) meta_file = "%s/%s.txt" % (batch_dir, dataset_name) if os.path.exists(out_path): return meta_file else: os.makedirs(out_path) tf = tarfile.open(data_file) mems = tf.getmembers() data = [] labels = [] file_id = 0 for mem in mems: if mem.name in img2label: data.append(tf.extractfile(mem).read()) labels.append(img2label[mem.name]) if len(data) == num_per_batch: output = {} output['label'] = labels output['data'] = data cPickle.dump( output, open('%s/batch_%d' % (out_path, file_id), 'w'), protocol=cPickle.HIGHEST_PROTOCOL) file_id += 1 data = [] labels = [] if len(data) > 0: output = {} output['label'] = labels output['data'] = data cPickle.dump( output, open('%s/batch_%d' % (out_path, file_id), 'w'), protocol=cPickle.HIGHEST_PROTOCOL) with open(meta_file, 'a') as meta: for file in os.listdir(out_path): meta.write(os.path.abspath("%s/%s" % (out_path, file)) + "\n") return meta_file def load_image_bytes(bytes, is_color=True): """ Load an color or gray image from bytes array. Example usage: .. code-block:: python with open('cat.jpg') as f: im = load_image_bytes(f.read()) :param bytes: the input image bytes array. :type bytes: str :param is_color: If set is_color True, it will load and return a color image. Otherwise, it will load and return a gray image. :type is_color: bool """ flag = 1 if is_color else 0 file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8) img = cv2.imdecode(file_bytes, flag) return img def load_image(file, is_color=True): """ Load an color or gray image from the file path. Example usage: .. code-block:: python im = load_image('cat.jpg') :param file: the input image path. :type file: string :param is_color: If set is_color True, it will load and return a color image. Otherwise, it will load and return a gray image. :type is_color: bool """ # cv2.IMAGE_COLOR for OpenCV3 # cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version # cv2.IMAGE_GRAYSCALE for OpenCV3 # cv2.CV_LOAD_IMAGE_GRAYSCALE for older OpenCV Version # Here, use constant 1 and 0 # 1: COLOR, 0: GRAYSCALE flag = 1 if is_color else 0 im = cv2.imread(file, flag) return im def resize_short(im, size): """ Resize an image so that the length of shorter edge is size. Example usage: .. code-block:: python im = load_image('cat.jpg') im = resize_short(im, 256) :param im: the input image with HWC layout. :type im: ndarray :param size: the shorter edge size of image after resizing. :type size: int """ h, w = im.shape[:2] h_new, w_new = size, size if h > w: h_new = size * h / w else: w_new = size * w / h im = cv2.resize(im, (h_new, w_new), interpolation=cv2.INTER_CUBIC) return im def to_chw(im, order=(2, 0, 1)): """ Transpose the input image order. The image layout is HWC format opened by cv2 or PIL. Transpose the input image to CHW layout according the order (2,0,1). Example usage: .. code-block:: python im = load_image('cat.jpg') im = resize_short(im, 256) im = to_chw(im) :param im: the input image with HWC layout. :type im: ndarray :param order: the transposed order. :type order: tuple|list """ assert len(im.shape) == len(order) im = im.transpose(order) return im def center_crop(im, size, is_color=True): """ Crop the center of image with size. Example usage: .. code-block:: python im = center_crop(im, 224) :param im: the input image with HWC layout. :type im: ndarray :param size: the cropping size. :type size: int :param is_color: whether the image is color or not. :type is_color: bool """ h, w = im.shape[:2] h_start = (h - size) / 2 w_start = (w - size) / 2 h_end, w_end = h_start + size, w_start + size if is_color: im = im[h_start:h_end, w_start:w_end, :] else: im = im[h_start:h_end, w_start:w_end] return im def random_crop(im, size, is_color=True): """ Randomly crop input image with size. Example usage: .. code-block:: python im = random_crop(im, 224) :param im: the input image with HWC layout. :type im: ndarray :param size: the cropping size. :type size: int :param is_color: whether the image is color or not. :type is_color: bool """ h, w = im.shape[:2] h_start = np.random.randint(0, h - size + 1) w_start = np.random.randint(0, w - size + 1) h_end, w_end = h_start + size, w_start + size if is_color: im = im[h_start:h_end, w_start:w_end, :] else: im = im[h_start:h_end, w_start:w_end] return im def left_right_flip(im, is_color=True): """ Flip an image along the horizontal direction. Return the flipped image. Example usage: .. code-block:: python im = left_right_flip(im) :param im: input image with HWC layout or HW layout for gray image :type im: ndarray :param is_color: whether input image is color or not :type is_color: bool """ if len(im.shape) == 3 and is_color: return im[:, ::-1, :] else: return im[:, ::-1] def simple_transform(im, resize_size, crop_size, is_train, is_color=True, mean=None): """ Simply data argumentation for training. These operations include resizing, croping and flipping. Example usage: .. code-block:: python im = simple_transform(im, 256, 224, True) :param im: The input image with HWC layout. :type im: ndarray :param resize_size: The shorter edge length of the resized image. :type resize_size: int :param crop_size: The cropping size. :type crop_size: int :param is_train: Whether it is training or not. :type is_train: bool :param is_color: whether the image is color or not. :type is_color: bool :param mean: the mean values, which can be element-wise mean values or mean values per channel. :type mean: numpy array | list """ im = resize_short(im, resize_size) if is_train: im = random_crop(im, crop_size, is_color=is_color) if np.random.randint(2) == 0: im = left_right_flip(im, is_color) else: im = center_crop(im, crop_size, is_color) im = center_crop(im, crop_size, is_color=is_color) if len(im.shape) == 3: im = to_chw(im) im = im.astype('float32') if mean is not None: mean = np.array(mean, dtype=np.float32) # mean value, may be one value per channel if mean.ndim == 1 and is_color: mean = mean[:, np.newaxis, np.newaxis] elif mean.ndim == 1: mean = mean else: # elementwise mean assert len(mean.shape) == len(im) im -= mean return im def load_and_transform(filename, resize_size, crop_size, is_train, is_color=True, mean=None): """ Load image from the input file `filename` and transform image for data argumentation. Please refer to the `simple_transform` interface for the transform operations. Example usage: .. code-block:: python im = load_and_transform('cat.jpg', 256, 224, True) :param filename: The file name of input image. :type filename: string :param resize_size: The shorter edge length of the resized image. :type resize_size: int :param crop_size: The cropping size. :type crop_size: int :param is_train: Whether it is training or not. :type is_train: bool :param is_color: whether the image is color or not. :type is_color: bool :param mean: the mean values, which can be element-wise mean values or mean values per channel. :type mean: numpy array | list """ im = load_image(filename, is_color) im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean) return im
11,097
28.052356
79
py
Paddle
Paddle-master/python/paddle/dataset/uci_housing.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UCI Housing dataset. This module will download dataset from https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and parse training set and test set into paddle reader creators. """ import os import numpy as np import tempfile import tarfile import os import paddle.dataset.common __all__ = ['train', 'test'] URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' MD5 = 'd4accdce7a25600298819f8e28e8d593' feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'convert' ] UCI_TRAIN_DATA = None UCI_TEST_DATA = None FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar' FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b' def feature_range(maximums, minimums): import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig, ax = plt.subplots() feature_num = len(maximums) ax.bar(range(feature_num), maximums - minimums, color='r', align='center') ax.set_title('feature scale') plt.xticks(range(feature_num), feature_names) plt.xlim([-1, feature_num]) fig.set_figheight(6) fig.set_figwidth(10) if not os.path.exists('./image'): os.makedirs('./image') fig.savefig('image/ranges.png', dpi=48) plt.close(fig) def load_data(filename, feature_num=14, ratio=0.8): global UCI_TRAIN_DATA, UCI_TEST_DATA if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None: return data = np.fromfile(filename, sep=' ') data = data.reshape(data.shape[0] / feature_num, feature_num) maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum( axis=0) / data.shape[0] feature_range(maximums[:-1], minimums[:-1]) for i in xrange(feature_num - 1): data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) offset = int(data.shape[0] * ratio) UCI_TRAIN_DATA = data[:offset] UCI_TEST_DATA = data[offset:] def train(): """ UCI_HOUSING training set creator. It returns a reader creator, each sample in the reader is features after normalization and price number. :return: Training reader creator :rtype: callable """ global UCI_TRAIN_DATA load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) def reader(): for d in UCI_TRAIN_DATA: yield d[:-1], d[-1:] return reader def test(): """ UCI_HOUSING test set creator. It returns a reader creator, each sample in the reader is features after normalization and price number. :return: Test reader creator :rtype: callable """ global UCI_TEST_DATA load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) def reader(): for d in UCI_TEST_DATA: yield d[:-1], d[-1:] return reader def fluid_model(): parameter_tar = paddle.dataset.common.download( FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar') tar = tarfile.TarFile(parameter_tar, mode='r') dirpath = tempfile.mkdtemp() tar.extractall(path=dirpath) return dirpath def predict_reader(): """ It returns just one tuple data to do inference. :return: one tuple data :rtype: tuple """ global UCI_TEST_DATA load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) return (UCI_TEST_DATA[0][:-1], ) def fetch(): paddle.dataset.common.download(URL, 'uci_housing', MD5) def convert(path): """ Converts dataset to recordio format """ paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train") paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
4,380
27.448052
109
py
Paddle
Paddle-master/python/paddle/dataset/wmt14.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ WMT14 dataset. The original WMT14 dataset is too large and a small set of data for set is provided. This module will download dataset from http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz and parse training set and test set into paddle reader creators. """ import tarfile import gzip import paddle.dataset.common __all__ = [ 'train', 'test', 'get_dict', 'convert', ] URL_DEV_TEST = ('http://www-lium.univ-lemans.fr/~schwenk/' 'cslm_joint_paper/data/dev+test.tgz') MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' # this is a small set of data for test. The original data is too large and # will be add later. URL_TRAIN = ('http://paddlepaddle.cdn.bcebos.com/demo/' 'wmt_shrinked_data/wmt14.tgz') MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c' # BLEU of this trained model is 26.92 URL_MODEL = 'http://paddlepaddle.bj.bcebos.com/demo/wmt_14/wmt14_model.tar.gz' MD5_MODEL = '0cb4a5366189b6acba876491c8724fa3' START = "<s>" END = "<e>" UNK = "<unk>" UNK_IDX = 2 def __read_to_dict(tar_file, dict_size): def __to_dict(fd, size): out_dict = dict() for line_count, line in enumerate(fd): if line_count < size: out_dict[line.strip()] = line_count else: break return out_dict with tarfile.open(tar_file, mode='r') as f: names = [ each_item.name for each_item in f if each_item.name.endswith("src.dict") ] assert len(names) == 1 src_dict = __to_dict(f.extractfile(names[0]), dict_size) names = [ each_item.name for each_item in f if each_item.name.endswith("trg.dict") ] assert len(names) == 1 trg_dict = __to_dict(f.extractfile(names[0]), dict_size) return src_dict, trg_dict def reader_creator(tar_file, file_name, dict_size): def reader(): src_dict, trg_dict = __read_to_dict(tar_file, dict_size) with tarfile.open(tar_file, mode='r') as f: names = [ each_item.name for each_item in f if each_item.name.endswith(file_name) ] for name in names: for line in f.extractfile(name): line_split = line.strip().split('\t') if len(line_split) != 2: continue src_seq = line_split[0] # one source sequence src_words = src_seq.split() src_ids = [ src_dict.get(w, UNK_IDX) for w in [START] + src_words + [END] ] trg_seq = line_split[1] # one target sequence trg_words = trg_seq.split() trg_ids = [trg_dict.get(w, UNK_IDX) for w in trg_words] # remove sequence whose length > 80 in training mode if len(src_ids) > 80 or len(trg_ids) > 80: continue trg_ids_next = trg_ids + [trg_dict[END]] trg_ids = [trg_dict[START]] + trg_ids yield src_ids, trg_ids, trg_ids_next return reader def train(dict_size): """ WMT14 training set creator. It returns a reader creator, each sample in the reader is source language word ID sequence, target language word ID sequence and next word ID sequence. :return: Training reader creator :rtype: callable """ return reader_creator( paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'train/train', dict_size) def test(dict_size): """ WMT14 test set creator. It returns a reader creator, each sample in the reader is source language word ID sequence, target language word ID sequence and next word ID sequence. :return: Test reader creator :rtype: callable """ return reader_creator( paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'test/test', dict_size) def gen(dict_size): return reader_creator( paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'gen/gen', dict_size) def get_dict(dict_size, reverse=True): # if reverse = False, return dict = {'a':'001', 'b':'002', ...} # else reverse = true, return dict = {'001':'a', '002':'b', ...} tar_file = paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN) src_dict, trg_dict = __read_to_dict(tar_file, dict_size) if reverse: src_dict = {v: k for k, v in src_dict.items()} trg_dict = {v: k for k, v in trg_dict.items()} return src_dict, trg_dict def fetch(): paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN) paddle.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL) def convert(path): """ Converts dataset to recordio format """ dict_size = 30000 paddle.dataset.common.convert(path, train(dict_size), 1000, "wmt14_train") paddle.dataset.common.convert(path, test(dict_size), 1000, "wmt14_test")
5,713
31.83908
78
py
Paddle
Paddle-master/python/paddle/dataset/voc2012.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image dataset for segmentation. The 2012 dataset contains images from 2008-2011 for which additional segmentations have been prepared. As in previous years the assignment to training/test sets has been maintained. The total number of images with segmentation has been increased from 7,062 to 9,993. """ import tarfile import io import numpy as np from paddle.dataset.common import download from paddle.dataset.image import * from PIL import Image __all__ = ['train', 'test', 'val'] VOC_URL = 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/\ VOCtrainval_11-May-2012.tar' VOC_MD5 = '6cd6e144f989b92b3379bac3b3de84fd' SET_FILE = 'VOCdevkit/VOC2012/ImageSets/Segmentation/{}.txt' DATA_FILE = 'VOCdevkit/VOC2012/JPEGImages/{}.jpg' LABEL_FILE = 'VOCdevkit/VOC2012/SegmentationClass/{}.png' CACHE_DIR = 'voc2012' def reader_creator(filename, sub_name): tarobject = tarfile.open(filename) name2mem = {} for ele in tarobject.getmembers(): name2mem[ele.name] = ele def reader(): set_file = SET_FILE.format(sub_name) sets = tarobject.extractfile(name2mem[set_file]) for line in sets: line = line.strip() data_file = DATA_FILE.format(line) label_file = LABEL_FILE.format(line) data = tarobject.extractfile(name2mem[data_file]).read() label = tarobject.extractfile(name2mem[label_file]).read() data = Image.open(io.BytesIO(data)) label = Image.open(io.BytesIO(label)) data = np.array(data) label = np.array(label) yield data, label return reader def train(): """ Create a train dataset reader containing 2913 images in HWC order. """ return reader_creator(download(VOC_URL, CACHE_DIR, VOC_MD5), 'trainval') def test(): """ Create a test dataset reader containing 1464 images in HWC order. """ return reader_creator(download(VOC_URL, CACHE_DIR, VOC_MD5), 'train') def val(): """ Create a val dataset reader containing 1449 images in HWC order. """ return reader_creator(download(VOC_URL, CACHE_DIR, VOC_MD5), 'val')
2,752
31.011628
76
py
Paddle
Paddle-master/python/paddle/dataset/imdb.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ IMDB dataset. This module downloads IMDB dataset from http://ai.stanford.edu/%7Eamaas/data/sentiment/. This dataset contains a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. Besides, this module also provides API for building dictionary. """ import paddle.dataset.common import collections import tarfile import re import string __all__ = ['build_dict', 'train', 'test', 'convert'] URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz' MD5 = '7c2ac02c03563afcf9b574c7e56c153a' def tokenize(pattern): """ Read files that match the given pattern. Tokenize and yield each file. """ with tarfile.open(paddle.dataset.common.download(URL, 'imdb', MD5)) as tarf: # Note that we should use tarfile.next(), which does # sequential access of member files, other than # tarfile.extractfile, which does random access and might # destroy hard disks. tf = tarf.next() while tf != None: if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip("\n\r").translate( None, string.punctuation).lower().split() tf = tarf.next() def build_dict(pattern, cutoff): """ Build a word dictionary from the corpus. Keys of the dictionary are words, and values are zero-based IDs of these words. """ word_freq = collections.defaultdict(int) for doc in tokenize(pattern): for word in doc: word_freq[word] += 1 # Not sure if we should prune less-frequent words here. word_freq = filter(lambda x: x[1] > cutoff, word_freq.items()) dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*dictionary)) word_idx = dict(zip(words, xrange(len(words)))) word_idx['<unk>'] = len(words) return word_idx def reader_creator(pos_pattern, neg_pattern, word_idx): UNK = word_idx['<unk>'] INS = [] def load(pattern, out, label): for doc in tokenize(pattern): out.append(([word_idx.get(w, UNK) for w in doc], label)) load(pos_pattern, INS, 0) load(neg_pattern, INS, 1) def reader(): for doc, label in INS: yield doc, label return reader def train(word_idx): """ IMDB training set creator. It returns a reader creator, each sample in the reader is an zero-based ID sequence and label in [0, 1]. :param word_idx: word dictionary :type word_idx: dict :return: Training reader creator :rtype: callable """ return reader_creator( re.compile("aclImdb/train/pos/.*\.txt$"), re.compile("aclImdb/train/neg/.*\.txt$"), word_idx) def test(word_idx): """ IMDB test set creator. It returns a reader creator, each sample in the reader is an zero-based ID sequence and label in [0, 1]. :param word_idx: word dictionary :type word_idx: dict :return: Test reader creator :rtype: callable """ return reader_creator( re.compile("aclImdb/test/pos/.*\.txt$"), re.compile("aclImdb/test/neg/.*\.txt$"), word_idx) def word_dict(): """ Build a word dictionary from the corpus. :return: Word dictionary :rtype: dict """ return build_dict( re.compile("aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150) def fetch(): paddle.dataset.common.download(URL, 'imdb', MD5) def convert(path): """ Converts dataset to recordio format """ w = word_dict() paddle.dataset.common.convert(path, lambda: train(w), 1000, "imdb_train") paddle.dataset.common.convert(path, lambda: test(w), 1000, "imdb_test")
4,370
28.533784
80
py
Paddle
Paddle-master/python/paddle/dataset/conll05.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conll05 dataset. Paddle semantic role labeling Book and demo use this dataset as an example. Because Conll05 is not free in public, the default downloaded URL is test set of Conll05 (which is public). Users can change URL and MD5 to their Conll dataset. And a pre-trained word vector model based on Wikipedia corpus is used to initialize SRL model. """ import tarfile import gzip import itertools import paddle.dataset.common __all__ = ['test, get_dict', 'get_embedding', 'convert'] DATA_URL = 'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz' DATA_MD5 = '387719152ae52d60422c016e92a742fc' WORDDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/wordDict.txt' WORDDICT_MD5 = 'ea7fb7d4c75cc6254716f0177a506baa' VERBDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/verbDict.txt' VERBDICT_MD5 = '0d2977293bbb6cbefab5b0f97db1e77c' TRGDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/targetDict.txt' TRGDICT_MD5 = 'd8c7f03ceb5fc2e5a0fa7503a4353751' EMB_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/emb' EMB_MD5 = 'bf436eb0faa1f6f9103017f8be57cdb7' UNK_IDX = 0 def load_label_dict(filename): d = dict() tag_dict = set() with open(filename, 'r') as f: for i, line in enumerate(f): line = line.strip() if line.startswith("B-"): tag_dict.add(line[2:]) elif line.startswith("I-"): tag_dict.add(line[2:]) index = 0 for tag in tag_dict: d["B-" + tag] = index index += 1 d["I-" + tag] = index index += 1 d["O"] = index return d def load_dict(filename): d = dict() with open(filename, 'r') as f: for i, line in enumerate(f): d[line.strip()] = i return d def corpus_reader(data_path, words_name, props_name): """ Read one corpus. It returns an iterator. Each element of this iterator is a tuple including sentence and labels. The sentence is consist of a list of word IDs. The labels include a list of label IDs. :return: a iterator of data. :rtype: iterator """ def reader(): tf = tarfile.open(data_path) wf = tf.extractfile(words_name) pf = tf.extractfile(props_name) with gzip.GzipFile(fileobj=wf) as words_file, gzip.GzipFile( fileobj=pf) as props_file: sentences = [] labels = [] one_seg = [] for word, label in itertools.izip(words_file, props_file): word = word.strip() label = label.strip().split() if len(label) == 0: # end of sentence for i in xrange(len(one_seg[0])): a_kind_lable = [x[i] for x in one_seg] labels.append(a_kind_lable) if len(labels) >= 1: verb_list = [] for x in labels[0]: if x != '-': verb_list.append(x) for i, lbl in enumerate(labels[1:]): cur_tag = 'O' is_in_bracket = False lbl_seq = [] verb_word = '' for l in lbl: if l == '*' and is_in_bracket == False: lbl_seq.append('O') elif l == '*' and is_in_bracket == True: lbl_seq.append('I-' + cur_tag) elif l == '*)': lbl_seq.append('I-' + cur_tag) is_in_bracket = False elif l.find('(') != -1 and l.find(')') != -1: cur_tag = l[1:l.find('*')] lbl_seq.append('B-' + cur_tag) is_in_bracket = False elif l.find('(') != -1 and l.find(')') == -1: cur_tag = l[1:l.find('*')] lbl_seq.append('B-' + cur_tag) is_in_bracket = True else: raise RuntimeError('Unexpected label: %s' % l) yield sentences, verb_list[i], lbl_seq sentences = [] labels = [] one_seg = [] else: sentences.append(word) one_seg.append(label) pf.close() wf.close() tf.close() return reader def reader_creator(corpus_reader, word_dict=None, predicate_dict=None, label_dict=None): def reader(): for sentence, predicate, labels in corpus_reader(): sen_len = len(sentence) verb_index = labels.index('B-V') mark = [0] * len(labels) if verb_index > 0: mark[verb_index - 1] = 1 ctx_n1 = sentence[verb_index - 1] else: ctx_n1 = 'bos' if verb_index > 1: mark[verb_index - 2] = 1 ctx_n2 = sentence[verb_index - 2] else: ctx_n2 = 'bos' mark[verb_index] = 1 ctx_0 = sentence[verb_index] if verb_index < len(labels) - 1: mark[verb_index + 1] = 1 ctx_p1 = sentence[verb_index + 1] else: ctx_p1 = 'eos' if verb_index < len(labels) - 2: mark[verb_index + 2] = 1 ctx_p2 = sentence[verb_index + 2] else: ctx_p2 = 'eos' word_idx = [word_dict.get(w, UNK_IDX) for w in sentence] ctx_n2_idx = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len ctx_n1_idx = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len ctx_0_idx = [word_dict.get(ctx_0, UNK_IDX)] * sen_len ctx_p1_idx = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len ctx_p2_idx = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len pred_idx = [predicate_dict.get(predicate)] * sen_len label_idx = [label_dict.get(w) for w in labels] yield word_idx, ctx_n2_idx, ctx_n1_idx, \ ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx return reader def get_dict(): """ Get the word, verb and label dictionary of Wikipedia corpus. """ word_dict = load_dict( paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)) verb_dict = load_dict( paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)) label_dict = load_label_dict( paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)) return word_dict, verb_dict, label_dict def get_embedding(): """ Get the trained word vector based on Wikipedia corpus. """ return paddle.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5) def test(): """ Conll05 test set creator. Because the training dataset is not free, the test dataset is used for training. It returns a reader creator, each sample in the reader is nine features, including sentence sequence, predicate, predicate context, predicate context flag and tagged sequence. :return: Training reader creator :rtype: callable """ word_dict, verb_dict, label_dict = get_dict() reader = corpus_reader( paddle.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5), words_name='conll05st-release/test.wsj/words/test.wsj.words.gz', props_name='conll05st-release/test.wsj/props/test.wsj.props.gz') return reader_creator(reader, word_dict, verb_dict, label_dict) def fetch(): paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5) paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5) paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5) paddle.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5) paddle.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5) def convert(path): """ Converts dataset to recordio format """ paddle.dataset.common.convert(path, test(), 1000, "conl105_train") paddle.dataset.common.convert(path, test(), 1000, "conl105_test")
9,313
35.52549
92
py
Paddle
Paddle-master/python/paddle/dataset/imikolov.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ imikolov's simple dataset. This module will download dataset from http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set into paddle reader creators. """ import paddle.dataset.common import collections import tarfile __all__ = ['train', 'test', 'build_dict', 'convert'] URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz' MD5 = '30177ea32e27c525793142b6bf2c8e2d' class DataType(object): NGRAM = 1 SEQ = 2 def word_count(f, word_freq=None): if word_freq is None: word_freq = collections.defaultdict(int) for l in f: for w in l.strip().split(): word_freq[w] += 1 word_freq['<s>'] += 1 word_freq['<e>'] += 1 return word_freq def build_dict(min_word_freq=50): """ Build a word dictionary from the corpus, Keys of the dictionary are words, and values are zero-based IDs of these words. """ train_filename = './simple-examples/data/ptb.train.txt' test_filename = './simple-examples/data/ptb.valid.txt' with tarfile.open( paddle.dataset.common.download(paddle.dataset.imikolov.URL, 'imikolov', paddle.dataset.imikolov.MD5)) as tf: trainf = tf.extractfile(train_filename) testf = tf.extractfile(test_filename) word_freq = word_count(testf, word_count(trainf)) if '<unk>' in word_freq: # remove <unk> for now, since we will set it as last index del word_freq['<unk>'] word_freq = filter(lambda x: x[1] > min_word_freq, word_freq.items()) word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*word_freq_sorted)) word_idx = dict(zip(words, xrange(len(words)))) word_idx['<unk>'] = len(words) return word_idx def reader_creator(filename, word_idx, n, data_type): def reader(): with tarfile.open( paddle.dataset.common.download( paddle.dataset.imikolov.URL, 'imikolov', paddle.dataset.imikolov.MD5)) as tf: f = tf.extractfile(filename) UNK = word_idx['<unk>'] for l in f: if DataType.NGRAM == data_type: assert n > -1, 'Invalid gram length' l = ['<s>'] + l.strip().split() + ['<e>'] if len(l) >= n: l = [word_idx.get(w, UNK) for w in l] for i in range(n, len(l) + 1): yield tuple(l[i - n:i]) elif DataType.SEQ == data_type: l = l.strip().split() l = [word_idx.get(w, UNK) for w in l] src_seq = [word_idx['<s>']] + l trg_seq = l + [word_idx['<e>']] if n > 0 and len(src_seq) > n: continue yield src_seq, trg_seq else: assert False, 'Unknow data type' return reader def train(word_idx, n, data_type=DataType.NGRAM): """ imikolov training set creator. It returns a reader creator, each sample in the reader is a word ID tuple. :param word_idx: word dictionary :type word_idx: dict :param n: sliding window size if type is ngram, otherwise max length of sequence :type n: int :param data_type: data type (ngram or sequence) :type data_type: member variable of DataType (NGRAM or SEQ) :return: Training reader creator :rtype: callable """ return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n, data_type) def test(word_idx, n, data_type=DataType.NGRAM): """ imikolov test set creator. It returns a reader creator, each sample in the reader is a word ID tuple. :param word_idx: word dictionary :type word_idx: dict :param n: sliding window size if type is ngram, otherwise max length of sequence :type n: int :param data_type: data type (ngram or sequence) :type data_type: member variable of DataType (NGRAM or SEQ) :return: Test reader creator :rtype: callable """ return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n, data_type) def fetch(): paddle.dataset.common.download(URL, "imikolov", MD5) def convert(path): """ Converts dataset to recordio format """ N = 5 word_dict = build_dict() paddle.dataset.common.convert(path, train(word_dict, N), 1000, "imikolov_train") paddle.dataset.common.convert(path, test(word_dict, N), 1000, "imikolov_test")
5,376
32.397516
84
py
Paddle
Paddle-master/python/paddle/dataset/common.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests import hashlib import os import errno import shutil import sys import importlib import paddle.dataset import cPickle import glob import cPickle as pickle __all__ = [ 'DATA_HOME', 'download', 'md5file', 'split', 'cluster_files_reader', 'convert', ] DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') # When running unit tests, there could be multiple processes that # trying to create DATA_HOME directory simultaneously, so we cannot # use a if condition to check for the existence of the directory; # instead, we use the filesystem as the synchronization mechanism by # catching returned errors. def must_mkdirs(path): try: os.makedirs(DATA_HOME) except OSError as exc: if exc.errno != errno.EEXIST: raise pass must_mkdirs(DATA_HOME) def md5file(fname): hash_md5 = hashlib.md5() f = open(fname, "rb") for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) f.close() return hash_md5.hexdigest() def download(url, module_name, md5sum, save_name=None): dirname = os.path.join(DATA_HOME, module_name) if not os.path.exists(dirname): os.makedirs(dirname) filename = os.path.join(dirname, url.split('/')[-1] if save_name is None else save_name) retry = 0 retry_limit = 3 while not (os.path.exists(filename) and md5file(filename) == md5sum): if os.path.exists(filename): print "file md5", md5file(filename), md5sum if retry < retry_limit: retry += 1 else: raise RuntimeError("Cannot download {0} within retry limit {1}". format(url, retry_limit)) print "Cache file %s not found, downloading %s" % (filename, url) r = requests.get(url, stream=True) total_length = r.headers.get('content-length') if total_length is None: with open(filename, 'w') as f: shutil.copyfileobj(r.raw, f) else: with open(filename, 'w') as f: dl = 0 total_length = int(total_length) for data in r.iter_content(chunk_size=4096): dl += len(data) f.write(data) done = int(50 * dl / total_length) sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done))) sys.stdout.flush() return filename def fetch_all(): for module_name in filter(lambda x: not x.startswith("__"), dir(paddle.dataset)): if "fetch" in dir( importlib.import_module("paddle.dataset.%s" % module_name)): getattr( importlib.import_module("paddle.dataset.%s" % module_name), "fetch")() def fetch_all_recordio(path): for module_name in filter(lambda x: not x.startswith("__"), dir(paddle.dataset)): if "convert" in dir( importlib.import_module("paddle.dataset.%s" % module_name)) and \ not module_name == "common": ds_path = os.path.join(path, module_name) must_mkdirs(ds_path) getattr( importlib.import_module("paddle.dataset.%s" % module_name), "convert")(ds_path) def split(reader, line_count, suffix="%05d.pickle", dumper=cPickle.dump): """ you can call the function as: split(paddle.dataset.cifar.train10(), line_count=1000, suffix="imikolov-train-%05d.pickle") the output files as: |-imikolov-train-00000.pickle |-imikolov-train-00001.pickle |- ... |-imikolov-train-00480.pickle :param reader: is a reader creator :param line_count: line count for each file :param suffix: the suffix for the output files, should contain "%d" means the id for each file. Default is "%05d.pickle" :param dumper: is a callable function that dump object to file, this function will be called as dumper(obj, f) and obj is the object will be dumped, f is a file object. Default is cPickle.dump. """ if not callable(dumper): raise TypeError("dumper should be callable.") lines = [] indx_f = 0 for i, d in enumerate(reader()): lines.append(d) if i >= line_count and i % line_count == 0: with open(suffix % indx_f, "w") as f: dumper(lines, f) lines = [] indx_f += 1 if lines: with open(suffix % indx_f, "w") as f: dumper(lines, f) def cluster_files_reader(files_pattern, trainer_count, trainer_id, loader=cPickle.load): """ Create a reader that yield element from the given files, select a file set according trainer count and trainer_id :param files_pattern: the files which generating by split(...) :param trainer_count: total trainer count :param trainer_id: the trainer rank id :param loader: is a callable function that load object from file, this function will be called as loader(f) and f is a file object. Default is cPickle.load """ def reader(): if not callable(loader): raise TypeError("loader should be callable.") file_list = glob.glob(files_pattern) file_list.sort() my_file_list = [] for idx, fn in enumerate(file_list): if idx % trainer_count == trainer_id: print "append file: %s" % fn my_file_list.append(fn) for fn in my_file_list: with open(fn, "r") as f: lines = loader(f) for line in lines: yield line return reader def convert(output_path, reader, line_count, name_prefix): import recordio """ Convert data from reader to recordio format files. :param output_path: directory in which output files will be saved. :param reader: a data reader, from which the convert program will read data instances. :param name_prefix: the name prefix of generated files. :param max_lines_to_shuffle: the max lines numbers to shuffle before writing. """ assert line_count >= 1 indx_f = 0 def write_data(indx_f, lines): filename = "%s/%s-%05d" % (output_path, name_prefix, indx_f) writer = recordio.writer(filename) for l in lines: # FIXME(Yancey1989): # dumps with protocol: pickle.HIGHEST_PROTOCOL writer.write(cPickle.dumps(l)) writer.close() lines = [] for i, d in enumerate(reader()): lines.append(d) if i % line_count == 0 and i >= line_count: write_data(indx_f, lines) lines = [] indx_f += 1 continue write_data(indx_f, lines)
7,722
31.586498
81
py
Paddle
Paddle-master/python/paddle/dataset/__init__.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset package. """ import mnist import imikolov import imdb import cifar import movielens import conll05 import uci_housing import sentiment import wmt14 import wmt16 import mq2007 import flowers import voc2012 import image __all__ = [ 'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment', 'uci_housing', 'wmt14', 'wmt16', 'mq2007', 'flowers', 'voc2012', 'image', ]
1,060
20.653061
74
py
Paddle
Paddle-master/python/paddle/dataset/movielens.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Movielens 1-M dataset. Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000 movies, which was collected by GroupLens Research. This module will download Movielens 1-M dataset from http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training set and test set into paddle reader creators. """ import zipfile import paddle.dataset.common import re import random import functools __all__ = [ 'train', 'test', 'get_movie_title_dict', 'max_movie_id', 'max_user_id', 'age_table', 'movie_categories', 'max_job_id', 'user_info', 'movie_info', 'convert' ] age_table = [1, 18, 25, 35, 45, 50, 56] URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip' MD5 = 'c4d9eecfca2ab87c1945afe126590906' class MovieInfo(object): """ Movie id, title and categories information are stored in MovieInfo. """ def __init__(self, index, categories, title): self.index = int(index) self.categories = categories self.title = title def value(self): """ Get information from a movie. """ return [ self.index, [CATEGORIES_DICT[c] for c in self.categories], [MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()] ] def __str__(self): return "<MovieInfo id(%d), title(%s), categories(%s)>" % ( self.index, self.title, self.categories) def __repr__(self): return self.__str__() class UserInfo(object): """ User id, gender, age, and job information are stored in UserInfo. """ def __init__(self, index, gender, age, job_id): self.index = int(index) self.is_male = gender == 'M' self.age = age_table.index(int(age)) self.job_id = int(job_id) def value(self): """ Get information from a user. """ return [self.index, 0 if self.is_male else 1, self.age, self.job_id] def __str__(self): return "<UserInfo id(%d), gender(%s), age(%d), job(%d)>" % ( self.index, "M" if self.is_male else "F", age_table[self.age], self.job_id) def __repr__(self): return str(self) MOVIE_INFO = None MOVIE_TITLE_DICT = None CATEGORIES_DICT = None USER_INFO = None def __initialize_meta_info__(): fn = paddle.dataset.common.download(URL, "movielens", MD5) global MOVIE_INFO if MOVIE_INFO is None: pattern = re.compile(r'^(.*)\((\d+)\)$') with zipfile.ZipFile(file=fn) as package: for info in package.infolist(): assert isinstance(info, zipfile.ZipInfo) MOVIE_INFO = dict() title_word_set = set() categories_set = set() with package.open('ml-1m/movies.dat') as movie_file: for i, line in enumerate(movie_file): movie_id, title, categories = line.strip().split('::') categories = categories.split('|') for c in categories: categories_set.add(c) title = pattern.match(title).group(1) MOVIE_INFO[int(movie_id)] = MovieInfo( index=movie_id, categories=categories, title=title) for w in title.split(): title_word_set.add(w.lower()) global MOVIE_TITLE_DICT MOVIE_TITLE_DICT = dict() for i, w in enumerate(title_word_set): MOVIE_TITLE_DICT[w] = i global CATEGORIES_DICT CATEGORIES_DICT = dict() for i, c in enumerate(categories_set): CATEGORIES_DICT[c] = i global USER_INFO USER_INFO = dict() with package.open('ml-1m/users.dat') as user_file: for line in user_file: uid, gender, age, job, _ = line.strip().split("::") USER_INFO[int(uid)] = UserInfo( index=uid, gender=gender, age=age, job_id=job) return fn def __reader__(rand_seed=0, test_ratio=0.1, is_test=False): fn = __initialize_meta_info__() rand = random.Random(x=rand_seed) with zipfile.ZipFile(file=fn) as package: with package.open('ml-1m/ratings.dat') as rating: for line in rating: if (rand.random() < test_ratio) == is_test: uid, mov_id, rating, _ = line.strip().split("::") uid = int(uid) mov_id = int(mov_id) rating = float(rating) * 2 - 5.0 mov = MOVIE_INFO[mov_id] usr = USER_INFO[uid] yield usr.value() + mov.value() + [[rating]] def __reader_creator__(**kwargs): return lambda: __reader__(**kwargs) train = functools.partial(__reader_creator__, is_test=False) test = functools.partial(__reader_creator__, is_test=True) def get_movie_title_dict(): """ Get movie title dictionary. """ __initialize_meta_info__() return MOVIE_TITLE_DICT def __max_index_info__(a, b): if a.index > b.index: return a else: return b def max_movie_id(): """ Get the maximum value of movie id. """ __initialize_meta_info__() return reduce(__max_index_info__, MOVIE_INFO.viewvalues()).index def max_user_id(): """ Get the maximum value of user id. """ __initialize_meta_info__() return reduce(__max_index_info__, USER_INFO.viewvalues()).index def __max_job_id_impl__(a, b): if a.job_id > b.job_id: return a else: return b def max_job_id(): """ Get the maximum value of job id. """ __initialize_meta_info__() return reduce(__max_job_id_impl__, USER_INFO.viewvalues()).job_id def movie_categories(): """ Get movie categoriges dictionary. """ __initialize_meta_info__() return CATEGORIES_DICT def user_info(): """ Get user info dictionary. """ __initialize_meta_info__() return USER_INFO def movie_info(): """ Get movie info dictionary. """ __initialize_meta_info__() return MOVIE_INFO def unittest(): for train_count, _ in enumerate(train()()): pass for test_count, _ in enumerate(test()()): pass print train_count, test_count def fetch(): paddle.dataset.common.download(URL, "movielens", MD5) def convert(path): """ Converts dataset to recordio format """ paddle.dataset.common.convert(path, train(), 1000, "movielens_train") paddle.dataset.common.convert(path, test(), 1000, "movielens_test") if __name__ == '__main__': unittest()
7,400
27.140684
79
py
Paddle
Paddle-master/python/paddle/dataset/cifar.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ CIFAR dataset. This module will download dataset from https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into paddle reader creators. The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes containing 600 images each. There are 500 training images and 100 testing images per class. """ import cPickle import itertools import numpy import paddle.dataset.common import tarfile __all__ = ['train100', 'test100', 'train10', 'test10', 'convert'] URL_PREFIX = 'https://www.cs.toronto.edu/~kriz/' CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz' CIFAR10_MD5 = 'c58f30108f718f92721af3b95e74349a' CIFAR100_URL = URL_PREFIX + 'cifar-100-python.tar.gz' CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85' def reader_creator(filename, sub_name): def read_batch(batch): data = batch['data'] labels = batch.get('labels', batch.get('fine_labels', None)) assert labels is not None for sample, label in itertools.izip(data, labels): yield (sample / 255.0).astype(numpy.float32), int(label) def reader(): with tarfile.open(filename, mode='r') as f: names = (each_item.name for each_item in f if sub_name in each_item.name) for name in names: batch = cPickle.load(f.extractfile(name)) for item in read_batch(batch): yield item return reader def train100(): """ CIFAR-100 training set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 99]. :return: Training reader creator :rtype: callable """ return reader_creator( paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5), 'train') def test100(): """ CIFAR-100 test set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Test reader creator. :rtype: callable """ return reader_creator( paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5), 'test') def train10(): """ CIFAR-10 training set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Training reader creator :rtype: callable """ return reader_creator( paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'data_batch') def test10(): """ CIFAR-10 test set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Test reader creator. :rtype: callable """ return reader_creator( paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'test_batch') def fetch(): paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5) paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5) def convert(path): """ Converts dataset to recordio format """ paddle.dataset.common.convert(path, train100(), 1000, "cifar_train100") paddle.dataset.common.convert(path, test100(), 1000, "cifar_test100") paddle.dataset.common.convert(path, train10(), 1000, "cifar_train10") paddle.dataset.common.convert(path, test10(), 1000, "cifar_test10")
4,163
28.742857
77
py
Paddle
Paddle-master/python/paddle/dataset/mq2007.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MQ2007 dataset MQ2007 is a query set from Million Query track of TREC 2007. There are about 1700 queries in it with labeled documents. In MQ2007, the 5-fold cross validation strategy is adopted and the 5-fold partitions are included in the package. In each fold, there are three subsets for learning: training set, validation set and testing set. MQ2007 dataset from website http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2007.rar and parse training set and test set into paddle reader creators """ import os import functools import rarfile from common import download import numpy as np # URL = "http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2007.rar" URL = "http://www.bigdatalab.ac.cn/benchmark/upload/download_source/7b6dbbe2-842c-11e4-a536-bcaec51b9163_MQ2007.rar" MD5 = "7be1640ae95c6408dab0ae7207bdc706" def __initialize_meta_info__(): """ download and extract the MQ2007 dataset """ fn = fetch() rar = rarfile.RarFile(fn) dirpath = os.path.dirname(fn) rar.extractall(path=dirpath) return dirpath class Query(object): """ queries used for learning to rank algorithms. It is created from relevance scores, query-document feature vectors Parameters: ---------- query_id : int query_id in dataset, mapping from query to relevance documents relevance_score : int relevance score of query and document pair feature_vector : array, dense feature feature in vector format description : string comment section in query doc pair data """ def __init__(self, query_id=-1, relevance_score=-1, feature_vector=None, description=""): self.query_id = query_id self.relevance_score = relevance_score if feature_vector is None: self.feature_vector = [] else: self.feature_vector = feature_vector self.description = description def __str__(self): string = "%s %s %s" % (str(self.relevance_score), str(self.query_id), " ".join(str(f) for f in self.feature_vector)) return string # @classmethod def _parse_(self, text): """ parse line into Query """ comment_position = text.find('#') line = text[:comment_position].strip() self.description = text[comment_position + 1:].strip() parts = line.split() if len(parts) != 48: sys.stdout.write("expect 48 space split parts, get %d" % (len(parts))) return None # format : 0 qid:10 1:0.000272 2:0.000000 .... self.relevance_score = int(parts[0]) self.query_id = int(parts[1].split(':')[1]) for p in parts[2:]: pair = p.split(':') self.feature_vector.append(float(pair[1])) return self class QueryList(object): """ group query into list, every item in list is a Query """ def __init__(self, querylist=None): self.query_id = -1 if querylist is None: self.querylist = [] else: self.querylist = querylist for query in self.querylist: if self.query_id == -1: self.query_id = query.query_id else: if self.query_id != query.query_id: raise ValueError("query in list must be same query_id") def __iter__(self): for query in self.querylist: yield query def __len__(self): return len(self.querylist) def __getitem__(self, i): return self.querylist[i] def _correct_ranking_(self): if self.querylist is None: return self.querylist.sort(key=lambda x: x.relevance_score, reverse=True) def _add_query(self, query): if self.query_id == -1: self.query_id = query.query_id else: if self.query_id != query.query_id: raise ValueError("query in list must be same query_id") self.querylist.append(query) def gen_plain_txt(querylist): """ gen plain text in list for other usage Paramters: -------- querylist : querylist, one query match many docment pairs in list, see QueryList return : ------ query_id : np.array, shape=(samples_num, ) label : np.array, shape=(samples_num, ) querylist : np.array, shape=(samples_num, feature_dimension) """ if not isinstance(querylist, QueryList): querylist = QueryList(querylist) querylist._correct_ranking_() for query in querylist: yield querylist.query_id, query.relevance_score, np.array( query.feature_vector) def gen_point(querylist): """ gen item in list for point-wise learning to rank algorithm Paramters: -------- querylist : querylist, one query match many docment pairs in list, see QueryList return : ------ label : np.array, shape=(samples_num, ) querylist : np.array, shape=(samples_num, feature_dimension) """ if not isinstance(querylist, QueryList): querylist = QueryList(querylist) querylist._correct_ranking_() for query in querylist: yield query.relevance_score, np.array(query.feature_vector) def gen_pair(querylist, partial_order="full"): """ gen pair for pair-wise learning to rank algorithm Paramters: -------- querylist : querylist, one query match many docment pairs in list, see QueryList pairtial_order : "full" or "neighbour" there is redudant in all possiable pair combinations, which can be simplifed gen pairs for neighbour items or the full partial order pairs return : ------ label : np.array, shape=(1) query_left : np.array, shape=(1, feature_dimension) query_right : same as left """ if not isinstance(querylist, QueryList): querylist = QueryList(querylist) querylist._correct_ranking_() labels = [] docpairs = [] # C(n,2) for i in range(len(querylist)): query_left = querylist[i] for j in range(i + 1, len(querylist)): query_right = querylist[j] if query_left.relevance_score > query_right.relevance_score: labels.append([1]) docpairs.append([ np.array(query_left.feature_vector), np.array(query_right.feature_vector) ]) elif query_left.relevance_score < query_right.relevance_score: labels.append([1]) docpairs.append([ np.array(query_right.feature_vector), np.array(query_left.feature_vector) ]) for label, pair in zip(labels, docpairs): yield np.array(label), pair[0], pair[1] def gen_list(querylist): """ gen item in list for list-wise learning to rank algorithm Paramters: -------- querylist : querylist, one query match many docment pairs in list, see QueryList return : ------ label : np.array, shape=(samples_num, ) querylist : np.array, shape=(samples_num, feature_dimension) """ if not isinstance(querylist, QueryList): querylist = QueryList(querylist) querylist._correct_ranking_() relevance_score_list = [[query.relevance_score] for query in querylist] feature_vector_list = [query.feature_vector for query in querylist] yield np.array(relevance_score_list), np.array(feature_vector_list) def query_filter(querylists): """ filter query get only document with label 0. label 0, 1, 2 means the relevance score document with query parameters : querylist : QueyList list return : querylist : QueyList list """ filter_query = [] for querylist in querylists: relevance_score_list = [query.relevance_score for query in querylist] if sum(relevance_score_list) != .0: filter_query.append(querylist) return filter_query def load_from_text(filepath, shuffle=False, fill_missing=-1): """ parse data file into querys """ prev_query_id = -1 querylists = [] querylist = None fn = __initialize_meta_info__() with open(os.path.join(fn, filepath)) as f: for line in f: query = Query() query = query._parse_(line) if query == None: continue if query.query_id != prev_query_id: if querylist is not None: querylists.append(querylist) querylist = QueryList() prev_query_id = query.query_id querylist._add_query(query) if querylist is not None: querylists.append(querylist) return querylists def __reader__(filepath, format="pairwise", shuffle=False, fill_missing=-1): """ Parameters -------- filename : string fill_missing : fill the missing value. default in MQ2007 is -1 Returns ------ yield label query_left, query_right # format = "pairwise" label querylist # format = "listwise" """ querylists = query_filter( load_from_text( filepath, shuffle=shuffle, fill_missing=fill_missing)) for querylist in querylists: if format == "plain_txt": yield next(gen_plain_txt(querylist)) elif format == "pointwise": yield next(gen_point(querylist)) elif format == "pairwise": for pair in gen_pair(querylist): yield pair elif format == "listwise": yield next(gen_list(querylist)) train = functools.partial(__reader__, filepath="MQ2007/MQ2007/Fold1/train.txt") test = functools.partial(__reader__, filepath="MQ2007/MQ2007/Fold1/test.txt") def fetch(): return download(URL, "MQ2007", MD5) if __name__ == "__main__": fetch() mytest = functools.partial( __reader__, filepath="MQ2007/MQ2007/Fold1/sample", format="listwise") for label, query in mytest(): print label, query
10,631
30.832335
151
py
Paddle
Paddle-master/python/paddle/dataset/sentiment.py
# /usr/bin/env python # -*- coding:utf-8 -*- # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The script fetch and preprocess movie_reviews data set that provided by NLTK TODO(yuyang18): Complete dataset. """ import collections from itertools import chain import nltk from nltk.corpus import movie_reviews import paddle.dataset.common __all__ = ['train', 'test', 'get_word_dict', 'convert'] NUM_TRAINING_INSTANCES = 1600 NUM_TOTAL_INSTANCES = 2000 def download_data_if_not_yet(): """ Download the data set, if the data set is not download. """ try: # make sure that nltk can find the data if paddle.dataset.common.DATA_HOME not in nltk.data.path: nltk.data.path.append(paddle.dataset.common.DATA_HOME) movie_reviews.categories() except LookupError: print "Downloading movie_reviews data set, please wait....." nltk.download( 'movie_reviews', download_dir=paddle.dataset.common.DATA_HOME) print "Download data set success....." print "Path is " + nltk.data.find('corpora/movie_reviews').path def get_word_dict(): """ Sorted the words by the frequency of words which occur in sample :return: words_freq_sorted """ words_freq_sorted = list() word_freq_dict = collections.defaultdict(int) download_data_if_not_yet() for category in movie_reviews.categories(): for field in movie_reviews.fileids(category): for words in movie_reviews.words(field): word_freq_dict[words] += 1 words_sort_list = word_freq_dict.items() words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) for index, word in enumerate(words_sort_list): words_freq_sorted.append((word[0], index)) return words_freq_sorted def sort_files(): """ Sorted the sample for cross reading the sample :return: files_list """ files_list = list() neg_file_list = movie_reviews.fileids('neg') pos_file_list = movie_reviews.fileids('pos') files_list = list(chain.from_iterable(zip(neg_file_list, pos_file_list))) return files_list def load_sentiment_data(): """ Load the data set :return: data_set """ data_set = list() download_data_if_not_yet() words_ids = dict(get_word_dict()) for sample_file in sort_files(): words_list = list() category = 0 if 'neg' in sample_file else 1 for word in movie_reviews.words(sample_file): words_list.append(words_ids[word.lower()]) data_set.append((words_list, category)) return data_set def reader_creator(data): """ Reader creator, generate an iterator for data set :param data: train data set or test data set """ for each in data: yield each[0], each[1] def train(): """ Default training set reader creator """ data_set = load_sentiment_data() return reader_creator(data_set[0:NUM_TRAINING_INSTANCES]) def test(): """ Default test set reader creator """ data_set = load_sentiment_data() return reader_creator(data_set[NUM_TRAINING_INSTANCES:]) def fetch(): nltk.download('movie_reviews', download_dir=paddle.dataset.common.DATA_HOME) def convert(path): """ Converts dataset to recordio format """ paddle.dataset.common.convert(path, train, 1000, "sentiment_train") paddle.dataset.common.convert(path, test, 1000, "sentiment_test")
4,032
27.602837
80
py
Paddle
Paddle-master/python/paddle/dataset/mnist.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MNIST dataset. This module will download dataset from http://yann.lecun.com/exdb/mnist/ and parse training set and test set into paddle reader creators. """ import paddle.dataset.common import subprocess import numpy import platform __all__ = ['train', 'test', 'convert'] URL_PREFIX = 'http://yann.lecun.com/exdb/mnist/' TEST_IMAGE_URL = URL_PREFIX + 't10k-images-idx3-ubyte.gz' TEST_IMAGE_MD5 = '9fb629c4189551a2d022fa330f9573f3' TEST_LABEL_URL = URL_PREFIX + 't10k-labels-idx1-ubyte.gz' TEST_LABEL_MD5 = 'ec29112dd5afa0611ce80d1b7f02629c' TRAIN_IMAGE_URL = URL_PREFIX + 'train-images-idx3-ubyte.gz' TRAIN_IMAGE_MD5 = 'f68b3c2dcbeaaa9fbdd348bbdeb94873' TRAIN_LABEL_URL = URL_PREFIX + 'train-labels-idx1-ubyte.gz' TRAIN_LABEL_MD5 = 'd53e105ee54ea40749a09fcbcd1e9432' def reader_creator(image_filename, label_filename, buffer_size): def reader(): if platform.system() == 'Darwin': zcat_cmd = 'gzcat' elif platform.system() == 'Linux': zcat_cmd = 'zcat' else: raise NotImplementedError() # According to http://stackoverflow.com/a/38061619/724872, we # cannot use standard package gzip here. m = subprocess.Popen([zcat_cmd, image_filename], stdout=subprocess.PIPE) m.stdout.read(16) # skip some magic bytes l = subprocess.Popen([zcat_cmd, label_filename], stdout=subprocess.PIPE) l.stdout.read(8) # skip some magic bytes try: # reader could be break. while True: labels = numpy.fromfile( l.stdout, 'ubyte', count=buffer_size).astype("int") if labels.size != buffer_size: break # numpy.fromfile returns empty slice after EOF. images = numpy.fromfile( m.stdout, 'ubyte', count=buffer_size * 28 * 28).reshape( (buffer_size, 28 * 28)).astype('float32') images = images / 255.0 * 2.0 - 1.0 for i in xrange(buffer_size): yield images[i, :], int(labels[i]) finally: m.terminate() l.terminate() return reader def train(): """ MNIST training set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Training reader creator :rtype: callable """ return reader_creator( paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5), paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5), 100) def test(): """ MNIST test set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Test reader creator. :rtype: callable """ return reader_creator( paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5), paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TEST_LABEL_MD5), 100) def fetch(): paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5) paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5) paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) def convert(path): """ Converts dataset to recordio format """ paddle.dataset.common.convert(path, train(), 1000, "minist_train") paddle.dataset.common.convert(path, test(), 1000, "minist_test")
4,256
33.609756
80
py
Paddle
Paddle-master/python/paddle/dataset/flowers.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module will download dataset from http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html and parse train/test set intopaddle reader creators. This set contains images of flowers belonging to 102 different categories. The images were acquired by searching the web and taking pictures. There are a minimum of 40 images for each category. The database was used in: Nilsback, M-E. and Zisserman, A. Automated flower classification over a large number of classes.Proceedings of the Indian Conference on Computer Vision, Graphics and Image Processing (2008) http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}. """ import cPickle import itertools import functools from common import download import tarfile import scipy.io as scio from paddle.dataset.image import * from paddle.reader import * import os import numpy as np from multiprocessing import cpu_count __all__ = ['train', 'test', 'valid'] DATA_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz' LABEL_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/imagelabels.mat' SETID_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/setid.mat' DATA_MD5 = '33bfc11892f1e405ca193ae9a9f2a118' LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d' SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c' # In official 'readme', tstid is the flag of test data # and trnid is the flag of train data. But test data is more than train data. # So we exchange the train data and test data. TRAIN_FLAG = 'tstid' TEST_FLAG = 'trnid' VALID_FLAG = 'valid' def default_mapper(is_train, sample): ''' map image bytes data to type needed by model input layer ''' img, label = sample img = load_image_bytes(img) img = simple_transform( img, 256, 224, is_train, mean=[103.94, 116.78, 123.68]) return img.flatten().astype('float32'), label train_mapper = functools.partial(default_mapper, True) test_mapper = functools.partial(default_mapper, False) def reader_creator(data_file, label_file, setid_file, dataset_name, mapper, buffered_size=1024, use_xmap=True): ''' 1. read images from tar file and merge images into batch files in 102flowers.tgz_batch/ 2. get a reader to read sample from batch file :param data_file: downloaded data file :type data_file: string :param label_file: downloaded label file :type label_file: string :param setid_file: downloaded setid file containing information about how to split dataset :type setid_file: string :param dataset_name: data set name (tstid|trnid|valid) :type dataset_name: string :param mapper: a function to map image bytes data to type needed by model input layer :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int :return: data reader :rtype: callable ''' labels = scio.loadmat(label_file)['labels'][0] indexes = scio.loadmat(setid_file)[dataset_name][0] img2label = {} for i in indexes: img = "jpg/image_%05d.jpg" % i img2label[img] = labels[i - 1] file_list = batch_images_from_tar(data_file, dataset_name, img2label) def reader(): for file in open(file_list): file = file.strip() batch = None with open(file, 'r') as f: batch = cPickle.load(f) data = batch['data'] labels = batch['label'] for sample, label in itertools.izip(data, batch['label']): yield sample, int(label) - 1 if use_xmap: return xmap_readers(mapper, reader, cpu_count(), buffered_size) else: return map_readers(mapper, reader) def train(mapper=train_mapper, buffered_size=1024, use_xmap=True): ''' Create flowers training set reader. It returns a reader, each sample in the reader is image pixels in [0, 1] and label in [1, 102] translated from original color image by steps: 1. resize to 256*256 2. random crop to 224*224 3. flatten :param mapper: a function to map sample. :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int :return: train data reader :rtype: callable ''' return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), download(SETID_URL, 'flowers', SETID_MD5), TRAIN_FLAG, mapper, buffered_size, use_xmap) def test(mapper=test_mapper, buffered_size=1024, use_xmap=True): ''' Create flowers test set reader. It returns a reader, each sample in the reader is image pixels in [0, 1] and label in [1, 102] translated from original color image by steps: 1. resize to 256*256 2. random crop to 224*224 3. flatten :param mapper: a function to map sample. :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int :return: test data reader :rtype: callable ''' return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), download(SETID_URL, 'flowers', SETID_MD5), TEST_FLAG, mapper, buffered_size, use_xmap) def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True): ''' Create flowers validation set reader. It returns a reader, each sample in the reader is image pixels in [0, 1] and label in [1, 102] translated from original color image by steps: 1. resize to 256*256 2. random crop to 224*224 3. flatten :param mapper: a function to map sample. :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int :return: test data reader :rtype: callable ''' return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), download(SETID_URL, 'flowers', SETID_MD5), VALID_FLAG, mapper, buffered_size, use_xmap) def fetch(): download(DATA_URL, 'flowers', DATA_MD5) download(LABEL_URL, 'flowers', LABEL_MD5) download(SETID_URL, 'flowers', SETID_MD5)
7,010
34.055
78
py
Paddle
Paddle-master/python/paddle/dataset/tests/flowers_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.flowers import unittest class TestFlowers(unittest.TestCase): def check_reader(self, reader): sum = 0 label = 0 size = 224 * 224 * 3 for l in reader(): self.assertEqual(l[0].size, size) if l[1] > label: label = l[1] sum += 1 return sum, label def test_train(self): instances, max_label_value = self.check_reader( paddle.dataset.flowers.train()) self.assertEqual(instances, 6149) self.assertEqual(max_label_value, 102) def test_test(self): instances, max_label_value = self.check_reader( paddle.dataset.flowers.test()) self.assertEqual(instances, 1020) self.assertEqual(max_label_value, 102) def test_valid(self): instances, max_label_value = self.check_reader( paddle.dataset.flowers.valid()) self.assertEqual(instances, 1020) self.assertEqual(max_label_value, 102) if __name__ == '__main__': unittest.main()
1,668
31.096154
74
py
Paddle
Paddle-master/python/paddle/dataset/tests/test_image.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle.dataset.image as image class Image(unittest.TestCase): def test_resize_flip_chw(self): # resize im = image.load_image('cat.jpg') im = image.resize_short(im, 256) self.assertEqual(256, min(im.shape[:2])) self.assertEqual(3, im.shape[2]) # flip im = image.left_right_flip(im) im2 = np.flip(im, 1) self.assertEqual(im.all(), im2.all()) # to_chw h, w, c = im.shape im = image.to_chw(im) self.assertEqual(c, im.shape[0]) self.assertEqual(h, im.shape[1]) self.assertEqual(w, im.shape[2]) if __name__ == '__main__': unittest.main()
1,322
29.068182
74
py
Paddle
Paddle-master/python/paddle/dataset/tests/test_sentiment.py
# /usr/bin/env python # -*- coding:utf-8 -*- # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import nltk import paddle.dataset.sentiment as st from nltk.corpus import movie_reviews class TestSentimentMethods(unittest.TestCase): def test_get_word_dict(self): word_dict = st.get_word_dict()[0:10] test_word_list = [(u',', 0), (u'the', 1), (u'.', 2), (u'a', 3), (u'and', 4), (u'of', 5), (u'to', 6), (u"'", 7), (u'is', 8), (u'in', 9)] for idx, each in enumerate(word_dict): self.assertEqual(each, test_word_list[idx]) self.assertTrue("/root/.cache/paddle/dataset" in nltk.data.path) def test_sort_files(self): last_label = '' for sample_file in st.sort_files(): current_label = sample_file.split("/")[0] self.assertNotEqual(current_label, last_label) last_label = current_label def test_data_set(self): data_set = st.load_sentiment_data() last_label = -1 for each in st.test(): self.assertNotEqual(each[1], last_label) last_label = each[1] self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES) self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES) self.assertEqual( len(list(st.test())), (st.NUM_TOTAL_INSTANCES - st.NUM_TRAINING_INSTANCES)) if __name__ == '__main__': unittest.main()
2,041
35.464286
74
py
Paddle
Paddle-master/python/paddle/dataset/tests/wmt16_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.wmt16 import unittest class TestWMT16(unittest.TestCase): def checkout_one_sample(self, sample): # train data has 3 field: source language word indices, # target language word indices, and target next word indices. self.assertEqual(len(sample), 3) # test start mark and end mark in source word indices. self.assertEqual(sample[0][0], 0) self.assertEqual(sample[0][-1], 1) # test start mask in target word indices self.assertEqual(sample[1][0], 0) # test en mask in target next word indices self.assertEqual(sample[2][-1], 1) def test_train(self): for idx, sample in enumerate( paddle.dataset.wmt16.train( src_dict_size=100000, trg_dict_size=100000)()): if idx >= 10: break self.checkout_one_sample(sample) def test_test(self): for idx, sample in enumerate( paddle.dataset.wmt16.test( src_dict_size=1000, trg_dict_size=1000)()): if idx >= 10: break self.checkout_one_sample(sample) def test_val(self): for idx, sample in enumerate( paddle.dataset.wmt16.validation( src_dict_size=1000, trg_dict_size=1000)()): if idx >= 10: break self.checkout_one_sample(sample) def test_get_dict(self): dict_size = 1000 word_dict = paddle.dataset.wmt16.get_dict("en", dict_size, True) self.assertEqual(len(word_dict), dict_size) self.assertEqual(word_dict[0], "<s>") self.assertEqual(word_dict[1], "<e>") self.assertEqual(word_dict[2], "<unk>") if __name__ == "__main__": unittest.main()
2,370
34.38806
74
py
Paddle
Paddle-master/python/paddle/dataset/tests/imikolov_test.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.imikolov import unittest WORD_DICT = paddle.dataset.imikolov.build_dict() class TestMikolov(unittest.TestCase): def check_reader(self, reader, n): for l in reader(): self.assertEqual(len(l), n) def test_train(self): n = 5 self.check_reader(paddle.dataset.imikolov.train(WORD_DICT, n), n) first_line = 'aer banknote berlitz calloway centrust cluett fromstein '\ 'gitano guterman hydro-quebec ipo kia memotec mlx nahb punts '\ 'rake regatta rubens sim snack-food ssangyong swapo wachter' first_line = [ WORD_DICT.get(ch, WORD_DICT['<unk>']) for ch in first_line.split(' ') ] for l in paddle.dataset.imikolov.train( WORD_DICT, n=-1, data_type=paddle.dataset.imikolov.DataType.SEQ)(): read_line = l[0][1:] break self.assertEqual(first_line, read_line) def test_test(self): n = 5 self.check_reader(paddle.dataset.imikolov.test(WORD_DICT, n), n) first_line = 'consumers may want to move their telephones a little '\ 'closer to the tv set' first_line = [ WORD_DICT.get(ch, WORD_DICT['<unk>']) for ch in first_line.split(' ') ] for l in paddle.dataset.imikolov.test( WORD_DICT, n=-1, data_type=paddle.dataset.imikolov.DataType.SEQ)(): read_line = l[0][1:] break self.assertEqual(first_line, read_line) def test_total(self): _, idx = zip(*WORD_DICT.items()) self.assertEqual(sorted(idx)[-1], len(WORD_DICT) - 1) if __name__ == '__main__': unittest.main()
2,359
33.705882
80
py
Paddle
Paddle-master/python/paddle/dataset/tests/imdb_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.imdb import unittest import re TRAIN_POS_PATTERN = re.compile("aclImdb/train/pos/.*\.txt$") TRAIN_NEG_PATTERN = re.compile("aclImdb/train/neg/.*\.txt$") TRAIN_PATTERN = re.compile("aclImdb/train/.*\.txt$") TEST_POS_PATTERN = re.compile("aclImdb/test/pos/.*\.txt$") TEST_NEG_PATTERN = re.compile("aclImdb/test/neg/.*\.txt$") TEST_PATTERN = re.compile("aclImdb/test/.*\.txt$") class TestIMDB(unittest.TestCase): word_idx = None def test_build_dict(self): if self.word_idx == None: self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) self.assertEqual(len(self.word_idx), 7036) def check_dataset(self, dataset, expected_size): if self.word_idx == None: self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) sum = 0 for l in dataset(self.word_idx): self.assertEqual(l[1], sum % 2) sum += 1 self.assertEqual(sum, expected_size) def test_train(self): self.check_dataset(paddle.dataset.imdb.train, 25000) def test_test(self): self.check_dataset(paddle.dataset.imdb.test, 25000) if __name__ == '__main__': unittest.main()
1,821
31.535714
78
py
Paddle
Paddle-master/python/paddle/dataset/tests/mnist_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.mnist import unittest class TestMNIST(unittest.TestCase): def check_reader(self, reader): sum = 0 label = 0 for l in reader(): self.assertEqual(l[0].size, 784) if l[1] > label: label = l[1] sum += 1 return sum, label def test_train(self): instances, max_label_value = self.check_reader( paddle.dataset.mnist.train()) self.assertEqual(instances, 60000) self.assertEqual(max_label_value, 9) def test_test(self): instances, max_label_value = self.check_reader( paddle.dataset.mnist.test()) self.assertEqual(instances, 10000) self.assertEqual(max_label_value, 9) if __name__ == '__main__': unittest.main()
1,412
30.4
74
py
Paddle
Paddle-master/python/paddle/dataset/tests/common_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.common import unittest import tempfile import glob class TestCommon(unittest.TestCase): def test_md5file(self): _, temp_path = tempfile.mkstemp() with open(temp_path, 'w') as f: f.write("Hello\n") self.assertEqual('09f7e02f1290be211da707a266f153b3', paddle.dataset.common.md5file(temp_path)) def test_download(self): yi_avatar = 'https://avatars0.githubusercontent.com/u/1548775?v=3&s=460' self.assertEqual( paddle.dataset.common.DATA_HOME + '/test/1548775?v=3&s=460', paddle.dataset.common.download(yi_avatar, 'test', 'f75287202d6622414c706c36c16f8e0d')) def test_split(self): def test_reader(): def reader(): for x in xrange(10): yield x return reader _, temp_path = tempfile.mkstemp() paddle.dataset.common.split( test_reader(), 4, suffix=temp_path + '/test-%05d.pickle') files = glob.glob(temp_path + '/test-%05d.pickle') self.assertEqual(len(files), 3) def test_cluster_file_reader(self): _, temp_path = tempfile.mkstemp() for x in xrange(5): with open(temp_path + '/%05d.test' % x) as f: f.write('%d\n' % x) reader = paddle.dataset.common.cluster_files_reader( temp_path + '/*.test', 5, 0) for idx, e in enumerate(reader()): self.assertEqual(e, str("0")) def test_convert(self): record_num = 10 num_shards = 4 def test_reader(): def reader(): for x in xrange(record_num): yield x return reader path = tempfile.mkdtemp() paddle.dataset.common.convert(path, test_reader(), num_shards, 'random_images') files = glob.glob(path + '/random_images-*') self.assertEqual(len(files), num_shards) recs = [] for i in range(0, num_shards): n = "%s/random_images-%05d-of-%05d" % (path, i, num_shards - 1) r = recordio.reader(n) while True: d = r.read() if d is None: break recs.append(d) recs.sort() self.assertEqual(total, record_num) if __name__ == '__main__': unittest.main()
3,111
31.757895
80
py
Paddle
Paddle-master/python/paddle/dataset/tests/voc2012_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.voc2012 import unittest class TestVOC(unittest.TestCase): def check_reader(self, reader): sum = 0 label = 0 for l in reader(): self.assertEqual(l[0].size, 3 * l[1].size) sum += 1 return sum def test_train(self): count = self.check_reader(paddle.dataset.voc_seg.train()) self.assertEqual(count, 2913) def test_test(self): count = self.check_reader(paddle.dataset.voc_seg.test()) self.assertEqual(count, 1464) def test_val(self): count = self.check_reader(paddle.dataset.voc_seg.val()) self.assertEqual(count, 1449) if __name__ == '__main__': unittest.main()
1,320
29.72093
74
py
Paddle
Paddle-master/python/paddle/dataset/tests/mq2007_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.mq2007 import unittest class TestMQ2007(unittest.TestCase): def test_pairwise(self): for label, query_left, query_right in paddle.dataset.mq2007.test( format="pairwise"): self.assertEqual(query_left.shape(), (46, )) self.assertEqual(query_right.shape(), (46, )) def test_listwise(self): for label_array, query_array in paddle.dataset.mq2007.test( format="listwise"): self.assertEqual(len(label_array), len(query_array)) if __name__ == "__main__": unittest.main()
1,196
34.205882
74
py
Paddle
Paddle-master/python/paddle/dataset/tests/cifar_test.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.dataset.cifar import unittest class TestCIFAR(unittest.TestCase): def check_reader(self, reader): sum = 0 label = 0 for l in reader(): self.assertEqual(l[0].size, 3072) if l[1] > label: label = l[1] sum += 1 return sum, label def test_test10(self): instances, max_label_value = self.check_reader( paddle.dataset.cifar.test10()) self.assertEqual(instances, 10000) self.assertEqual(max_label_value, 9) def test_train10(self): instances, max_label_value = self.check_reader( paddle.dataset.cifar.train10()) self.assertEqual(instances, 50000) self.assertEqual(max_label_value, 9) def test_test100(self): instances, max_label_value = self.check_reader( paddle.dataset.cifar.test100()) self.assertEqual(instances, 10000) self.assertEqual(max_label_value, 99) def test_train100(self): instances, max_label_value = self.check_reader( paddle.dataset.cifar.train100()) self.assertEqual(instances, 50000) self.assertEqual(max_label_value, 99) if __name__ == '__main__': unittest.main()
1,859
31.631579
74
py
Paddle
Paddle-master/python/paddle/fluid/unique_name.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import contextlib import sys __all__ = ['generate', 'switch', 'guard', 'UniqueNameGenerator'] class UniqueNameGenerator(object): """ Generate unique name with prefix. Args: prefix(str): The generated name prefix. All generated name will be started with this prefix. """ def __init__(self, prefix=None): self.ids = collections.defaultdict(int) if prefix is None: prefix = "" self.prefix = prefix def __call__(self, key): """ Generate unique names with prefix Args: key(str): The key of return string. Returns(str): A unique string with the prefix """ tmp = self.ids[key] self.ids[key] += 1 return self.prefix + "_".join([key, str(tmp)]) generator = UniqueNameGenerator() def generate(key): return generator(key) def switch(new_generator=None): global generator old = generator if new_generator is None: generator = UniqueNameGenerator() else: generator = new_generator return old @contextlib.contextmanager def guard(new_generator=None): if isinstance(new_generator, basestring): new_generator = UniqueNameGenerator(new_generator) old = switch(new_generator) yield switch(old)
1,953
25.053333
74
py
Paddle
Paddle-master/python/paddle/fluid/lod_tensor.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import core import numpy as np __all__ = ['create_lod_tensor', 'create_random_int_lodtensor'] def _validate_lod(lod, tensor_height=-1): """Check whether the input length-based lod info is valid. There are several things to check: 1. lod should be a list of lists. Empty list is fine. 2. The length of each sublist (a lod level) should be at least one. 3. Each element in each lod level should be an integer greater than 0. 4. The sum of one lod level should be equal to the length of the next lod level. 5. The sum of the last lod level should be equal to the tensor height. Bypass this check if user does not provide tensor_height as input. Args: lod: the length-based lod info, e.g., [[2, 3], [2, 1, 2, 3, 4]]. tensor_height: the outermost dimension of the tensor with which the input lod is associated with. Returns: A boolean indicating whether the input lod is valid or not. """ assert isinstance(lod, list), "lod should be a list" # Empty lod is fine if len(lod) == 0: return True lod_sum = [] for level in lod: assert isinstance(level, list), "each item in lod should be a list" # Each level of lod should have at least one length info if len(level) < 1: return False level_sum = 0 for lod_len in level: # Each length in a level should be > 0 if lod_len <= 0: return False level_sum += lod_len lod_sum.append(level_sum) for idx, val in enumerate(lod_sum[:-1]): # Each level's sum should be equal to # the number of items in the next level if val != len(lod[idx + 1]): return False if tensor_height == -1: return True else: # Last level's sum should be equal to the tensor height return lod_sum[-1] == tensor_height def _convert_lod(lod): """Convert a length-based lod to a offset-based lod. If the length-based lod is [[2, 3], [2, 1, 2, 3, 4]], then the offset-based lod is [[0, 2, 5], [0, 2, 3, 5, 8, 12]]. Args: lod: a length-based lod info. Returns: A list of lists as the offset-based lod converted to from the input lod. """ new_lod = [] for level in lod: cur_len = 0 new_level = [cur_len] for lod_len in level: cur_len += lod_len new_level.append(cur_len) new_lod.append(new_level) return new_lod def create_lod_tensor(data, lod, place): """Create a lod tensor from a numpy array, a list, or an existing lod tensor. Create a lod tensor by doing the following: 1. Check that the length-based input lod is valid. 2. Convert the length-based lod to a offset-based LoD. 3. Copy the data from a numpy array, a list or a existing lod tensor to CPU or GPU device (based on input place). 4. Set the level of detail (LoD) using the offset-based LoD. Use example: Suppose we want LoDTensor to hold data for sequences of word, where each word is represented by an integer. If we want to create a LoDTensor to represent two sentences, one of 2 words, and one of 3 words. Then 'data' can be a numpy array of integers with shape (5, 1). 'lod' will be [[2, 3]], indicating the length(# of words) in each sentence. This length-based input lod [[2, 3]] will be converted to offset-based lod [[0, 2, 5]] inside the function call. Please refer to github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/lod_tensor.md for more details regarding LoD. Args: data: a numpy array or a LoDTensor or a list holding the data to be copied. lod: a list of lists indicating the length-based LoD info specified by the user. place: CPU or GPU place indicating where the data in the new LoDTensor will be stored. Returns: A fluid LoDTensor object with tensor data and lod info. """ if isinstance(data, core.LoDTensor): return create_lod_tensor(np.array(data), lod, place) elif isinstance(data, list): # When input data is a list, it only deal with the case where the base element # is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated # LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number # of words or other indexes in the sequence. new_lod = [] for seq in data: new_lod.append(len(seq)) assert [new_lod] == lod, "data and lod do not match" flattened_data = np.concatenate(data, axis=0).astype("int64") flattened_data = flattened_data.reshape([len(flattened_data), 1]) return create_lod_tensor(flattened_data, lod, place) elif isinstance(data, np.ndarray): assert _validate_lod(lod, data.shape[0]), "the provided lod info is invalid" tensor = core.LoDTensor() tensor.set(data, place) tensor.set_lod(_convert_lod(lod)) return tensor else: raise TypeError( "data should be either a LoDTensor, a Numpy array or a list") def create_random_int_lodtensor(lod, base_shape, place, low, high): """Create a LoDTensor containing random integers. This function is frequently used in the book examples. So we revised it based on the new create_lod_tensor API and put it here in the lod_tensor module to simplify the code. The function does the following: 1. Calculate the overall shape of the LoDTensor based on the length-based 'lod' input and the shape of the basic element in 'base_shape'. 2. Create a numpy array of this shape. 3. Create the LoDTensor using create_lod_tensor API. Suppose we want LoDTensor to hold data for sequences of word, where each word is represented by an integer. If we want to create a LoDTensor to represent two sentences, one of 2 words, and one of 3 words. Then 'base_shape' is [1], input length-based 'lod' is [[2, 3]]. Then the overall shape of the LoDTensor would be [5, 1], holding 5 words for two sentences. Args: data: a numpy array or a LoDTensor holding the data to be copied. lod: a list of lists indicating the length-based LoD info specified by the user. base_shape: the shape of the basic element to be held by the LoDTensor. place: CPU or GPU place indicating where the data in the new LoDTensor will be stored. low: the lower bound of the random integers. high: the upper bound of the random integers. Returns: A fluid LoDTensor object with tensor data and lod info. """ assert isinstance(base_shape, list), "base_shape should be a list" converted_lod = _convert_lod(lod) # append the total number of basic elements to the front of its shape overall_shape = [converted_lod[-1][-1]] + base_shape # the range of integer data elements is [low, high] data = np.random.random_integers(low, high, overall_shape).astype("int64") return create_lod_tensor(data, lod, place)
7,802
40.068421
94
py
Paddle
Paddle-master/python/paddle/fluid/concurrency.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from layers.control_flow import BlockGuard, equal from .framework import Operator from layer_helper import LayerHelper, unique_name from layers import fill_constant import core __all__ = [ 'Go', 'make_channel', 'channel_send', 'channel_recv', 'channel_close', 'Select' ] class Go(BlockGuard): def __init__(self, name=None): self.helper = LayerHelper("go", name=name) super(Go, self).__init__(self.helper.main_program) def __enter__(self): super(Go, self).__enter__() def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: return False self.construct_go_op() return super(Go, self).__exit__(exc_type, exc_val, exc_tb) def construct_go_op(self): main_program = self.helper.main_program go_block = main_program.current_block() parent_block = main_program.block(main_program.current_block() .parent_idx) inner_outputs = set() x_name_list = set() for op in go_block.ops: # Iterate over all operators, get all the inputs # and add as input to the Go operator. for iname in op.input_names: for in_var_name in op.input(iname): if in_var_name not in inner_outputs: x_name_list.add(in_var_name) for oname in op.output_names: for out_var_name in op.output(oname): inner_outputs.add(out_var_name) # Iterate over all operators , get all the outputs # add to the output list of Go operator only if # they exist in the parent block. out_vars = [] for inner_out_name in inner_outputs: if inner_out_name in parent_block.vars: out_vars.append(parent_block.var(inner_out_name)) parent_block.append_op( type='go', inputs={ 'X': [parent_block.var_recursive(x_name) for x_name in x_name_list] }, outputs={}, attrs={'sub_block': go_block}) class SelectCase(object): DEFAULT = 0 SEND = 1 RECEIVE = 2 def __init__(self, select, case_idx, case_to_execute, channel_action_fn=None, channel=None, value=None, is_copy=False): self.select = select self.helper = LayerHelper('conditional_block') self.main_program = self.helper.main_program self.is_scalar_condition = True self.case_to_execute = case_to_execute self.idx = case_idx # Since we aren't going to use the `channel_send` or `channel_recv` # functions directly, we just need to capture the name. self.action = (self.SEND if channel_action_fn.__name__ == ('channel_send') else self.RECEIVE) if channel_action_fn else self.DEFAULT X = value if self.action == self.SEND and is_copy: # We create of copy of the data we want to send copied_X = self.select.parent_block.create_var( name=unique_name.generate(value.name + '_copy'), type=value.type, dtype=value.dtype, shape=value.shape, lod_level=value.lod_level, capacity=value.capacity if hasattr(value, 'capacity') else None, ) self.select.parent_block.append_op( type="assign", inputs={"X": value}, outputs={"Out": copied_X}) X = copied_X self.value = X self.channel = channel def __enter__(self): self.block = self.main_program.create_block() def construct_op(self): main_program = self.helper.main_program cases_block = main_program.current_block() inner_outputs = set() input_set = set() params = set() for op in self.block.ops: # Iterate over all operators, get all the inputs # and add as input to the SelectCase operator. for iname in op.input_names: for in_var_name in op.input(iname): if in_var_name not in inner_outputs: input_set.add(in_var_name) for oname in op.output_names: for out_var_name in op.output(oname): inner_outputs.add(out_var_name) param_list = [ cases_block.var(each_name) for each_name in params if each_name not in input_set ] # Iterate over all operators, get all the outputs # add to the output list of SelectCase operator only if # they exist in the parent block. out_vars = [] for inner_out_name in inner_outputs: if inner_out_name in cases_block.vars: out_vars.append(cases_block.var(inner_out_name)) # First, create an op that will determine whether or not this is the # conditional variable to execute. should_execute_block = equal( fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=self.idx), self.case_to_execute) step_scope = cases_block.create_var( type=core.VarDesc.VarType.STEP_SCOPES) cases_block.append_op( type='conditional_block', inputs={'X': [should_execute_block], 'Params': param_list}, outputs={'Out': out_vars, 'Scope': [step_scope]}, attrs={ 'sub_block': self.block, 'is_scalar_condition': self.is_scalar_condition }) return '%s,%s,%s,%s' % (self.idx, self.action, self.channel.name if self.channel else '', self.value.name if self.value else '') def __exit__(self, exc_type, exc_val, exc_tb): self.main_program.rollback() if exc_type is not None: return False # re-raise exception return True class Select(BlockGuard): def __init__(self, name=None): self.helper = LayerHelper('select', name=name) self.parent_block = self.helper.main_program.current_block() self.cases = [] super(Select, self).__init__(self.helper.main_program) self.case_to_execute = fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=-1) def __enter__(self): super(Select, self).__enter__() return self def case(self, channel_action_fn, channel, value, is_copy=False): """Create a new block for this condition. """ select_case = SelectCase(self, len(self.cases), self.case_to_execute, channel_action_fn, channel, value, is_copy) self.cases.append(select_case) return select_case def default(self): """Create a default case block for this condition. """ default_case = SelectCase(self, len(self.cases), self.case_to_execute) self.cases.append(default_case) return default_case def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: return False # Create a select op and another block to wrap its # case blocks. select_block = self.helper.main_program.current_block() parent_block = self.helper.main_program.block(select_block.parent_idx) # Construct each case op, inside the newly created select block. serialized_cases = [] for case in self.cases: serialized_cases.append(case.construct_op()) intermediate = set() params = set() for case_block in select_block.ops: if case_block.attrs and 'sub_block' in case_block.attrs: for each_op in case_block.attrs['sub_block'].ops: assert isinstance(each_op, Operator) for iname in each_op.input_names: for in_var_name in each_op.input(iname): if in_var_name not in intermediate: params.add(in_var_name) for oname in each_op.output_names: for out_var_name in each_op.output(oname): intermediate.add(out_var_name) out_list = [ parent_block.var(var_name) for var_name in parent_block.vars if var_name in intermediate ] X = [select_block.var_recursive(x_name) for x_name in params] # Needs to be used by `equal` inside the cases block. X.append(self.case_to_execute) # Construct the select op. parent_block.append_op( type='select', inputs={'X': X, 'case_to_execute': self.case_to_execute}, attrs={'sub_block': select_block, 'cases': serialized_cases}, outputs={'Out': out_list}) return super(Select, self).__exit__(exc_type, exc_val, exc_tb) def make_channel(dtype, capacity=0): """ Helps implementation of a concurrent program by creating a "channel" of a defined data type. Channels allow for the passing of data in concurrent scenarios - such as when using threads to divide computation. Channels can be used to "send" and "receive" such data concurrently. There are two kinds of channels: unbuffered and buffered. Unbuffered channels have no capacity - and thus, block on send and only unblock only once what they have sent has been received. On the other hand, buffered channels are initialized with a capacity - and do not block on sends. Use this method in combination with `channel_send`, `channel_recv`, `channel_close`, and `Go` to design a concurrent Paddle program. Args: dtype (ParamAttr|string): Data type of the data sent in the channel. This data type should be the string name of a numpy data type. capacity (ParamAttr|int): Size of the channel. Defaults to 0 for to create an unbuffered channel. Returns: Variable: The channel variable that can be used to send an receive data of the defined dtype. Examples: .. code-block:: python ch = fluid.make_channel(dtype='int32', capacity=10) ... # Code to execute in a Go block, which receives the channel data. fluid.channel_send(ch, 100) fluid.channel_close(ch) """ helper = LayerHelper('channel_create', **locals()) main_program = helper.main_program make_channel_block = main_program.current_block() # Make a channel variable (using the channel data type) and make sure it # persists into the global scope. channel = helper.create_variable( name=unique_name.generate('channel'), type=core.VarDesc.VarType.CHANNEL, persistable=True) create_channel_op = make_channel_block.append_op( type="channel_create", outputs={"Out": channel}, attrs={"data_type": dtype, "capacity": capacity}) return channel def channel_send(channel, value, is_copy=False): """ Sends a value through a channel variable. Used by an unbuffered or buffered channel to pass data from within or to a concurrent Go block, where `channel_recv` to used to get the passed value. Args: channel (Variable|Channel): Channel variable created using `make_channel`. value (Variable): Value to send to channel is_copy (bool): Copy data while channel send. If False, then data is moved. The input cannot be used after move. (default False) Returns: Variable: The boolean status on whether or not the channel successfully sent the passed value. Examples: .. code-block:: python ch = fluid.make_channel(dtype='int32', capacity=10) ... # Code to execute in a Go block, which receives the channel data. fluid.channel_send(ch, 100) """ helper = LayerHelper('channel_send', **locals()) main_program = helper.main_program channel_send_block = main_program.current_block() X = value if is_copy: copied_X = helper.create_variable( name=unique_name.generate(value.name + '_copy'), type=value.type, dtype=value.dtype, shape=value.shape, lod_level=value.lod_level, capacity=value.capacity if hasattr(value, 'capacity') else None) assign_op = channel_send_block.append_op( type="assign", inputs={"X": value}, outputs={"Out": copied_X}) X = copied_X channel_send_block.append_op( type="channel_send", inputs={ "Channel": channel, "X": X, }) def channel_recv(channel, return_value): """ Receives a value through a channel variable. Used by an unbuffered or buffered channel within a concurrent Go block to get data from originally sent using `channel_send`, or from outside such a block where `channel_send` is used to send the value. Args: channel (Variable|Channel): Channel variable created using `make_channel`. return_value (Variable): Variable to set as a result of running channel_recv_op Returns: Variable: The received value from the channel. Variable: The boolean status on whether or not the channel successfully received the passed value. Examples: .. code-block:: python ch = fluid.make_channel(dtype='int32', capacity=10) with fluid.Go(): returned_value, return_status = fluid.channel_recv(ch, 'int32') # Code to send data through the channel. """ helper = LayerHelper('channel_recv', **locals()) main_program = helper.main_program channel_recv_block = main_program.current_block() status = helper.create_variable( name=unique_name.generate('status'), type=core.VarDesc.VarType.LOD_TENSOR, dtype=core.VarDesc.VarType.BOOL) channel_recv_op = channel_recv_block.append_op( type="channel_recv", inputs={"Channel": channel}, outputs={"Out": return_value, "Status": status}) return return_value, status def channel_close(channel): """ Closes a channel created using `make_channel`. Args: channel (Variable|Channel): Channel variable created using `make_channel`. Examples: .. code-block:: python ch = fluid.make_channel(dtype='int32', capacity=10) ... # Code to receive and send data through a channel ... fluid.channel_close(ch) """ helper = LayerHelper('channel_close', **locals()) main_program = helper.main_program channel_close_block = main_program.current_block() channel_close_op = channel_close_block.append_op( type="channel_close", inputs={"Channel": channel})
15,874
34.121681
87
py
Paddle
Paddle-master/python/paddle/fluid/op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid.core as core import paddle.fluid.proto.framework_pb2 as framework_pb2 def get_all_op_protos(): """ Get all registered op proto from PaddlePaddle C++ end. :return: A list of registered OpProto. """ protostrs = core.get_all_op_protos() ret_values = [] for pbstr in protostrs: op_proto = framework_pb2.OpProto.FromString(str(pbstr)) ret_values.append(op_proto) return ret_values def is_str(s): return isinstance(s, str) or isinstance(s, unicode) class OpDescCreationMethod(object): """ Convert the user's input(only keyword arguments are supported) to OpDesc based on the OpProto. :param op_proto: The OpProto object. :type op_proto: op_proto_pb2.OpProto """ def __init__(self, op_proto): if not isinstance(op_proto, framework_pb2.OpProto): raise TypeError( "Type of op_proto should be OpProto in PaddlePaddle.") self.__op_proto__ = op_proto def __call__(self, *args, **kwargs): """ Convert user's input to OpDesc. Only keyword arguments are supported. :return: The OpDesc based on user input. :rtype: op_desc_pb2.OpDesc """ if len(args) != 0: raise ValueError("Only keyword arguments are supported.") op_desc = framework_pb2.OpDesc() for input_parameter in self.__op_proto__.inputs: input_arguments = kwargs.get(input_parameter.name, []) if is_str(input_arguments): input_arguments = [input_arguments] if not input_parameter.duplicable and len(input_arguments) > 1: raise ValueError( "Input %s expects only one input, but %d are given." % (input_parameter.name, len(input_arguments))) ipt = op_desc.inputs.add() ipt.parameter = input_parameter.name ipt.arguments.extend(input_arguments) for output_parameter in self.__op_proto__.outputs: output_arguments = kwargs.get(output_parameter.name, []) if is_str(output_arguments): output_arguments = [output_arguments] if not output_parameter.duplicable and len(output_arguments) > 1: raise ValueError( "Output %s expects only one output, but %d are given." % (output_parameter.name, len(output_arguments))) out = op_desc.outputs.add() out.parameter = output_parameter.name out.arguments.extend(output_arguments) # Types op_desc.type = self.__op_proto__.type # Attrs for attr in self.__op_proto__.attrs: if attr.generated: continue user_defined_attr = kwargs.get(attr.name, None) if user_defined_attr is not None: new_attr = op_desc.attrs.add() new_attr.name = attr.name new_attr.type = attr.type if attr.type == framework_pb2.INT: new_attr.i = user_defined_attr elif attr.type == framework_pb2.FLOAT: new_attr.f = user_defined_attr elif attr.type == framework_pb2.STRING: new_attr.s = user_defined_attr elif attr.type == framework_pb2.BOOLEAN: new_attr.b = user_defined_attr elif attr.type == framework_pb2.INTS: new_attr.ints.extend(user_defined_attr) elif attr.type == framework_pb2.FLOATS: new_attr.floats.extend(user_defined_attr) elif attr.type == framework_pb2.STRINGS: new_attr.strings.extend(user_defined_attr) elif attr.type == framework_pb2.BOOLEANS: new_attr.bools.extend(user_defined_attr) elif attr.type == framework_pb2.INT_PAIRS: for p in user_defined_attr: pair = new_attr.int_pairs.add() pair.first = p[0] pair.second = p[1] else: raise NotImplementedError( "A not supported attribute type: %s." % ( str(attr.type))) return op_desc @staticmethod def any_is_true(generator): """ Reduce a boolean array to a single boolean parameter. If any element in the array is True, this function will return True, otherwise False. """ for flag in generator: if flag: return True return False class OpInfo(object): def __init__(self, name, method, inputs, outputs, attrs): self.name = name self.method = method self.inputs = inputs self.outputs = outputs self.attrs = attrs def create_op_creation_method(op_proto): """ Generate op creation method for an OpProto. """ method = OpDescCreationMethod(op_proto) def __impl__(*args, **kwargs): opdesc = method(*args, **kwargs) return core.Operator.create(opdesc.SerializeToString()) return OpInfo( method=__impl__, name=op_proto.type, inputs=[(var.name, var.duplicable) for var in op_proto.inputs], outputs=[(var.name, var.duplicable) for var in op_proto.outputs], attrs=[attr.name for attr in op_proto.attrs]) class OperatorFactory(object): def __init__(self): self.op_methods = dict() for op_proto in get_all_op_protos(): method = create_op_creation_method(op_proto) self.op_methods[method.name] = method def __call__(self, *args, **kwargs): if "type" in kwargs: if len(args) != 0: raise ValueError( "Except the argument \"type\"," "all of the other arguments should be keyword arguments.") t = kwargs.pop("type") else: if len(args) != 1: raise ValueError( "Except the argument \"type\"," "all of the other arguments should be keyword arguments.") t = args[0] return self.get_op_info(t).method(**kwargs) def types(self): return self.op_methods.keys() def get_op_info(self, t): if t not in self.op_methods: raise ValueError("The operator: %s is not registered." % t) return self.op_methods.get(t) def get_op_input_names(self, type): return map(lambda x: x[0], self.get_op_info(type).inputs) def get_op_inputs(self, type): return self.get_op_info(type).inputs def get_op_output_names(self, type): return map(lambda x: x[0], self.get_op_info(type).outputs) def get_op_outputs(self, type): return self.get_op_info(type).outputs def get_op_attr_names(self, type): return self.get_op_info(type).attrs class __RecurrentOp__(object): __proto__ = None type = "recurrent" def __init__(self): # cache recurrent_op's proto if self.__proto__ is None: for op_proto in get_all_op_protos(): if op_proto.type == self.type: self.__proto__ = op_proto def __call__(self, *args, **kwargs): if self.type not in args and "type" not in kwargs: kwargs["type"] = self.type # create proto create_method = OpDescCreationMethod(self.__proto__) proto = create_method(*args, **kwargs) # create rnnop return core.RecurrentOp.create(proto.SerializeToString()) class __DynamicRecurrentOp__(object): __proto__ = None type = "dynamic_recurrent" def __init__(self): # cache recurrent_op's proto if self.__proto__ is None: for op_proto in get_all_op_protos(): if op_proto.type == self.type: self.__proto__ = op_proto def __call__(self, *args, **kwargs): if self.type not in args and "type" not in kwargs: kwargs["type"] = self.type # create proto create_method = OpDescCreationMethod(self.__proto__) proto = create_method(*args, **kwargs) # create rnnop return core.DynamicRecurrentOp.create(proto.SerializeToString()) class __CondOp__(object): __proto__ = None type = "cond" def __init__(self): # cache recurrent_op's proto if self.__proto__ is None: for op_proto in get_all_op_protos(): if op_proto.type == self.type: self.__proto__ = op_proto def __call__(self, *args, **kwargs): if self.type not in args and "type" not in kwargs: kwargs["type"] = self.type # create proto create_method = OpDescCreationMethod(self.__proto__) proto = create_method(*args, **kwargs) # create condop return core.CondOp.create(proto.SerializeToString()) Operator = OperatorFactory() # The default global factory RecurrentOp = __RecurrentOp__() DynamicRecurrentOp = __DynamicRecurrentOp__() CondOp = __CondOp__()
9,820
33.826241
79
py
Paddle
Paddle-master/python/paddle/fluid/graphviz.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import subprocess import logging def crepr(v): if type(v) is str or type(v) is unicode: return '"%s"' % v return str(v) class Rank(object): def __init__(self, kind, name, priority): ''' kind: str name: str priority: int ''' self.kind = kind self.name = name self.priority = priority self.nodes = [] def __str__(self): if not self.nodes: return '' return '{' + 'rank={};'.format(self.kind) + \ ','.join([node.name for node in self.nodes]) + '}' class Graph(object): rank_counter = 0 def __init__(self, title, **attrs): self.title = title self.attrs = attrs self.nodes = [] self.edges = [] self.rank_groups = {} def code(self): return self.__str__() def rank_group(self, kind, priority): name = "rankgroup-%d" % Graph.rank_counter Graph.rank_counter += 1 rank = Rank(kind, name, priority) self.rank_groups[name] = rank return name def node(self, label, prefix, description="", **attrs): node = Node(label, prefix, description, **attrs) if 'rank' in attrs: rank = self.rank_groups[attrs['rank']] del attrs['rank'] rank.nodes.append(node) self.nodes.append(node) return node def edge(self, source, target, **attrs): edge = Edge(source, target, **attrs) self.edges.append(edge) return edge def compile(self, dot_path): file = open(dot_path, 'w') file.write(self.__str__()) image_path = os.path.join( os.path.dirname(dot_path), dot_path[:-3] + "pdf") cmd = ["dot", "-Tpdf", dot_path, "-o", image_path] subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logging.warning("write block debug graph to {}".format(image_path)) return image_path def show(self, dot_path): image = self.compile(dot_path) cmd = ["open", image] subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def _rank_repr(self): ranks = sorted( self.rank_groups.items(), cmp=lambda a, b: a[1].priority > b[1].priority) repr = [] for x in ranks: repr.append(str(x[1])) return '\n'.join(repr) + '\n' def __str__(self): reprs = [ 'digraph G {', 'title = {}'.format(crepr(self.title)), ] for attr in self.attrs: reprs.append("{key}={value};".format( key=attr, value=crepr(self.attrs[attr]))) reprs.append(self._rank_repr()) random.shuffle(self.nodes) reprs += [str(node) for node in self.nodes] for x in self.edges: reprs.append(str(x)) reprs.append('}') return '\n'.join(reprs) class Node(object): counter = 1 def __init__(self, label, prefix, description="", **attrs): self.label = label self.name = "%s_%d" % (prefix, Node.counter) self.description = description self.attrs = attrs Node.counter += 1 def __str__(self): reprs = '{name} [label={label} {extra} ];'.format( name=self.name, label=self.label, extra=',' + ','.join("%s=%s" % (key, crepr(value)) for key, value in self.attrs.items()) if self.attrs else "") return reprs class Edge(object): def __init__(self, source, target, **attrs): ''' Link source to target. :param source: Node :param target: Node :param graph: Graph :param attrs: dic ''' self.source = source self.target = target self.attrs = attrs def __str__(self): repr = "{source} -> {target} {extra}".format( source=self.source.name, target=self.target.name, extra="" if not self.attrs else "[" + ','.join("{}={}".format(attr[0], crepr(attr[1])) for attr in self.attrs.items()) + "]") return repr class GraphPreviewGenerator(object): ''' Generate a graph image for ONNX proto. ''' def __init__(self, title): # init graphviz graph self.graph = Graph( title, layout="dot", concentrate="true", rankdir="TB", ) self.op_rank = self.graph.rank_group('same', 2) self.param_rank = self.graph.rank_group('same', 1) self.arg_rank = self.graph.rank_group('same', 0) def __call__(self, path='temp.dot', show=False): if not show: self.graph.compile(path) else: self.graph.show(path) def add_param(self, name, data_type, highlight=False): label = '\n'.join([ '<<table cellpadding="5">', ' <tr>', ' <td bgcolor="#2b787e">', ' <b>', name, ' </b>', ' </td>', ' </tr>', ' <tr>', ' <td>', str(data_type), ' </td>' ' </tr>', '</table>>', ]) return self.graph.node( label, prefix="param", description=name, shape="none", style="rounded,filled,bold", width="1.3", color="#148b97" if not highlight else "orange", fontcolor="#ffffff", fontname="Arial") def add_op(self, opType, **kwargs): highlight = False if 'highlight' in kwargs: highlight = kwargs['highlight'] del kwargs['highlight'] return self.graph.node( "<<B>%s</B>>" % opType, prefix="op", description=opType, shape="box", style="rounded, filled, bold", color="#303A3A" if not highlight else "orange", fontname="Arial", fontcolor="#ffffff", width="1.3", height="0.84", ) def add_arg(self, name, highlight=False): return self.graph.node( crepr(name), prefix="arg", description=name, shape="box", style="rounded,filled,bold", fontname="Arial", fontcolor="#999999", color="#dddddd" if not highlight else "orange") def add_edge(self, source, target, **kwargs): highlight = False if 'highlight' in kwargs: highlight = kwargs['highlight'] del kwargs['highlight'] return self.graph.edge( source, target, color="#00000" if not highlight else "orange", **kwargs)
7,660
27.585821
75
py
Paddle
Paddle-master/python/paddle/fluid/average.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import warnings """ Class of all kinds of Average. All Averages are accomplished via Python totally. They do not change Paddle's Program, nor do anything to modify NN model's configuration. They are completely wrappers of Python functions. """ __all__ = ["WeightedAverage"] def _is_number_(var): return isinstance(var, int) or isinstance(var, float) or (isinstance( var, np.ndarray) and var.shape == (1, )) def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) class WeightedAverage(object): def __init__(self): warnings.warn( "The %s is deprecated, please use fluid.metrics.Accuracy instead." % (self.__class__.__name__), Warning) self.reset() def reset(self): self.numerator = None self.denominator = None def add(self, value, weight): if not _is_number_or_matrix_(value): raise ValueError( "The 'value' must be a number(int, float) or a numpy ndarray.") if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") if self.numerator is None or self.denominator is None: self.numerator = value * weight self.denominator = weight else: self.numerator += value * weight self.denominator += weight def eval(self): if self.numerator is None or self.denominator is None: raise ValueError( "There is no data to be averaged in WeightedAverage.") return self.numerator / self.denominator
2,263
32.294118
80
py
Paddle
Paddle-master/python/paddle/fluid/debugger.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import re from graphviz import GraphPreviewGenerator import proto.framework_pb2 as framework_pb2 from google.protobuf import text_format _vartype2str_ = [ "UNK", "LoDTensor", "SelectedRows", "FeedMinibatch", "FetchList", "StepScopes", "LodRankTable", "LoDTensorArray", "PlaceList", ] _dtype2str_ = [ "bool", "int16", "int32", "int64", "float16", "float32", "float64", ] def repr_data_type(type): return _dtype2str_[type] def repr_tensor(proto): return "tensor(type={}, shape={})".format(_dtype2str_[int(proto.data_type)], str(proto.dims)) reprtpl = "{ttype} {name} ({reprs})" def repr_lodtensor(proto): if proto.type.type != framework_pb2.VarType.LOD_TENSOR: return level = proto.type.lod_tensor.lod_level reprs = repr_tensor(proto.type.lod_tensor.tensor) return reprtpl.format( ttype="LoDTensor" if level > 0 else "Tensor", name=proto.name, reprs="level=%d, %s" % (level, reprs) if level > 0 else reprs) def repr_selected_rows(proto): if proto.type.type != framework_pb2.VarType.SELECTED_ROWS: return return reprtpl.format( ttype="SelectedRows", name=proto.name, reprs=repr_tensor(proto.type.selected_rows)) def repr_tensor_array(proto): if proto.type.type != framework_pb2.VarType.LOD_TENSOR_ARRAY: return return reprtpl.format( ttype="TensorArray", name=proto.name, reprs="level=%d, %s" % (proto.type.tensor_array.lod_level, repr_tensor(proto.type.lod_tensor.tensor))) type_handlers = [ repr_lodtensor, repr_selected_rows, repr_tensor_array, ] def repr_var(vardesc): for handler in type_handlers: res = handler(vardesc) if res: return res def pprint_program_codes(program_desc): reprs = [] for block_idx in range(program_desc.desc.num_blocks()): block_desc = program_desc.block(block_idx) block_repr = pprint_block_codes(block_desc) reprs.append(block_repr) return '\n'.join(reprs) def pprint_block_codes(block_desc, show_backward=False): def is_op_backward(op_desc): if op_desc.type.endswith('_grad'): return True def is_var_backward(var): if "@GRAD" in var.parameter: return True for arg in var.arguments: if "@GRAD" in arg: return True for var in op_desc.inputs: if is_var_backward(var): return True for var in op_desc.outputs: if is_var_backward(var): return True return False def is_var_backward(var_desc): return "@GRAD" in var_desc.name if type(block_desc) is not framework_pb2.BlockDesc: block_desc = framework_pb2.BlockDesc.FromString( block_desc.desc.serialize_to_string()) var_reprs = [] op_reprs = [] for var in block_desc.vars: if not show_backward and is_var_backward(var): continue var_reprs.append(repr_var(var)) for op in block_desc.ops: if not show_backward and is_op_backward(op): continue op_reprs.append(repr_op(op)) tpl = "// block-{idx} parent-{pidx}\n// variables\n{vars}\n\n// operators\n{ops}\n" return tpl.format( idx=block_desc.idx, pidx=block_desc.parent_idx, vars='\n'.join(var_reprs), ops='\n'.join(op_reprs), ) def repr_attr(desc): tpl = "{key}={value}" valgetter = [ lambda attr: attr.i, lambda attr: attr.f, lambda attr: attr.s, lambda attr: attr.ints, lambda attr: attr.floats, lambda attr: attr.strings, lambda attr: attr.b, lambda attr: attr.bools, lambda attr: attr.block_idx, lambda attr: attr.l, ] key = desc.name value = valgetter[desc.type](desc) if key == "dtype": value = repr_data_type(value) return tpl.format(key=key, value=str(value)), (key, value) def _repr_op_fill_constant(optype, inputs, outputs, attrs): if optype == "fill_constant": return "{output} = {data} [shape={shape}]".format( output=','.join(outputs), data=attrs['value'], shape=str(attrs['shape'])) op_repr_handlers = [_repr_op_fill_constant, ] def repr_op(opdesc): optype = None attrs = [] attr_dict = {} is_target = None inputs = [] outputs = [] tpl = "{outputs} = {optype}({inputs}{is_target}) [{attrs}]" args2value = lambda args: args[0] if len(args) == 1 else str(list(args)) for var in opdesc.inputs: key = var.parameter value = args2value(var.arguments) inputs.append("%s=%s" % (key, value)) for var in opdesc.outputs: value = args2value(var.arguments) outputs.append(value) for attr in opdesc.attrs: attr_repr, attr_pair = repr_attr(attr) attrs.append(attr_repr) attr_dict[attr_pair[0]] = attr_pair[1] is_target = opdesc.is_target for handler in op_repr_handlers: res = handler(opdesc.type, inputs, outputs, attr_dict) if res: return res return tpl.format( outputs=', '.join(outputs), optype=opdesc.type, inputs=', '.join(inputs), attrs="{%s}" % ','.join(attrs), is_target=", is_target" if is_target else "") def draw_block_graphviz(block, highlights=None, path="./temp.dot"): ''' Generate a debug graph for block. Args: block(Block): a block. ''' graph = GraphPreviewGenerator("some graph") # collect parameters and args protostr = block.desc.serialize_to_string() desc = framework_pb2.BlockDesc.FromString(str(protostr)) def need_highlight(name): if highlights is None: return False for pattern in highlights: assert type(pattern) is str if re.match(pattern, name): return True return False # draw parameters and args vars = {} for var in desc.vars: # TODO(gongwb): format the var.type # create var if var.persistable: varn = graph.add_param( var.name, str(var.type).replace("\n", "<br />", 1), highlight=need_highlight(var.name)) else: varn = graph.add_arg(var.name, highlight=need_highlight(var.name)) vars[var.name] = varn def add_op_link_var(op, var, op2var=False): for arg in var.arguments: if arg not in vars: # add missing variables as argument vars[arg] = graph.add_arg(arg, highlight=need_highlight(arg)) varn = vars[arg] highlight = need_highlight(op.description) or need_highlight( varn.description) if op2var: graph.add_edge(op, varn, highlight=highlight) else: graph.add_edge(varn, op, highlight=highlight) for op in desc.ops: opn = graph.add_op(op.type, highlight=need_highlight(op.type)) for var in op.inputs: add_op_link_var(opn, var, False) for var in op.outputs: add_op_link_var(opn, var, True) graph(path, show=False)
7,919
28.010989
88
py
Paddle
Paddle-master/python/paddle/fluid/clip.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import functools import layers import framework from . import core __all__ = [ 'ErrorClipByValue', 'GradientClipByValue', 'GradientClipByNorm', 'GradientClipByGlobalNorm', 'append_gradient_clip_ops', 'error_clip_callback', ] class BaseErrorClipAttr(object): def __str__(self): raise NotImplementedError() def append_clip_op(self, block, grad_name): raise NotImplementedError() class ErrorClipByValue(BaseErrorClipAttr): def __init__(self, max, min=None): max = float(max) if min is None: min = -max else: min = float(min) self.max = max self.min = min def __str__(self): return "ByValue, min=%f, max=%f" % (self.min, self.max) def append_clip_op(self, block, grad_name): clip_op_desc = block.desc.append_op() clip_op_desc.set_type("clip") clip_op_desc.set_input("X", [grad_name]) clip_op_desc.set_output("Out", [grad_name]) clip_op_desc.set_attr("min", self.min) clip_op_desc.set_attr("max", self.max) def error_clip_callback(block, context): # the context is a grad_to_var map grad_to_var = context op_desc = block.desc.op(block.desc.op_size() - 1) for grad_n in filter(lambda n: grad_to_var.has_key(n), op_desc.output_arg_names()): fwd_var = block.var_recursive(grad_to_var[grad_n]) error_clip = getattr(fwd_var, "error_clip", None) if not (error_clip is None or isinstance(error_clip, BaseErrorClipAttr)): raise TypeError( "Variable's error_clip should be an instance of BaseErrorClipAttr or None." ) if error_clip is not None: error_clip.append_clip_op(block, grad_n) class BaseGradientClipAttr(object): def __str__(self): raise NotImplementedError() def process_context(self, context, param, grad): raise NotImplementedError() def create_operators(self, param, grad): raise NotImplementedError() class NullGradientClipAttr(BaseGradientClipAttr): def __str__(self): return "Null" def process_context(self, context, param, grad): pass def create_operators(self, param, grad): return param, grad class GradientClipByValue(BaseGradientClipAttr): def __init__(self, max, min=None): max = float(max) if min is None: min = -max else: min = float(min) self.max = max self.min = min def __str__(self): return "ByValue, min=%f, max=%f" % (self.min, self.max) def process_context(self, context, param, grad): pass def create_operators(self, param, grad): new_grad = layers.clip(x=grad, min=self.min, max=self.max) return param, new_grad class GradientClipByNorm(BaseGradientClipAttr): def __init__(self, clip_norm): self.clip_norm = clip_norm def __str__(self): return "ByNorm, clip_norm=%f" % self.clip_norm def process_context(self, context, param, grad): pass def create_operators(self, param, grad): new_grad = layers.clip_by_norm(x=grad, max_norm=self.clip_norm) return param, new_grad class GradientClipByGlobalNorm(BaseGradientClipAttr): def __init__(self, clip_norm, group_name="default_group"): if not isinstance(group_name, basestring): raise TypeError("'group_name' must be a basestring.") self.clip_norm = clip_norm self.group_name = group_name def __str__(self): return "ByGlobalNorm, group_name=%s, clip_norm=%f" % (self.group_name, self.clip_norm) def process_context(self, context, param, grad): if self.group_name not in context: context[self.group_name] = [] context[self.group_name + "_clip_value"] = self.clip_norm context[self.group_name + "_clip"] = layers.fill_constant( shape=[1], dtype="float32", value=self.clip_norm) else: if not self.clip_norm == context[self.group_name + "_clip_value"]: raise ValueError( "All parameters' 'clip_norm' of a same group should be the same" ) local_norm_var = layers.reduce_sum(input=layers.pow(x=grad, factor=2.0)) context[self.group_name].append(local_norm_var) self.context = context def create_operators(self, param, grad): group_scale_name = self.group_name + "_scale" if group_scale_name not in self.context: group_norm_var = layers.sums(input=self.context[self.group_name]) layers.sqrt(x=group_norm_var, out=group_norm_var) clip_var = self.context[self.group_name + "_clip"] group_scale_var = layers.elementwise_div( x=clip_var, y=layers.elementwise_max( x=clip_var, y=group_norm_var)) assert group_scale_var.shape == (1L, ) self.context[group_scale_name] = group_scale_var new_grad = layers.elementwise_mul( x=grad, y=self.context[group_scale_name]) return param, new_grad def set_gradient_clip(clip, param_list=None, program=None): """ To specify parameters that require gradient clip. Args: clip(BaseGradientClipAttr): An instance of some derived class of BaseGradientClipAttr, which describes the type and detailed attributes of required gradient clip. param_list(list, None by default): Parameters that require gradient clip. It can be a list of parameter or a list of parameter's name. When it's None, all parameters in the program will be included. program(Program, None by default): The program where parameters are. Will be the default main program when assigned with None. """ if not isinstance(clip, BaseGradientClipAttr): raise TypeError( "'clip' should be an instance of BaseGradientClipAttr's derived class" ) if program is None: program = framework.default_main_program() if param_list is None: param_list = program.block(0).all_parameters() if all(isinstance(elem, basestring) for elem in param_list): param_list = [program.block(0).var(elem) for elem in param_list] if not all(isinstance(elem, framework.Parameter) for elem in param_list): raise TypeError( "'param_list' should be a list of Parameter or basestring(parameter's name)." ) for param in param_list: param.gradient_clip_attr = copy.deepcopy(clip) def append_gradient_clip_ops(param_grad): context = dict() for p, g in param_grad: with p.block.program.optimized_guard(p): clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) if clip_attr is None: clip_attr = NullGradientClipAttr() if not isinstance(clip_attr, BaseGradientClipAttr): raise TypeError( "clip attribute should be an instance of BaseGradientClipAttr" ) clip_attr.process_context(context=context, param=p, grad=g) res = [] for p, g in param_grad: with p.block.program.optimized_guard(p): res.append(clip_attr.create_operators(param=p, grad=g)) return res ClipByValue = GradientClipByValue ClipByNorm = GradientClipByNorm ClipByGlobalNorm = GradientClipByGlobalNorm
8,315
33.65
99
py
Paddle
Paddle-master/python/paddle/fluid/parallel_executor.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import core import multiprocessing import framework import executor import warnings import sys __all__ = ['ParallelExecutor', 'ExecutionStrategy', 'BuildStrategy'] ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy BuildStrategy = core.ParallelExecutor.BuildStrategy class ParallelExecutor(object): def __init__(self, use_cuda, loss_name=None, main_program=None, share_vars_from=None, exec_strategy=None, build_strategy=None, num_trainers=1, trainer_id=0, **kwargs): """ ParallelExecutor can run program in parallel. Args: use_cuda(bool): Whether to use CUDA or not. loss_name(str, default None): The loss name must set in training. main_program(Program, default None): The program that need to run, if not provided, then default_main_program will be used. share_vars_from(ParallelExecutor, default None): If provied, it will share variables from the specified ParallelExecutor. num_trainers(int, default 1): If greater than 1, NCCL will be initialized with multpile rank of nodes, each node should have same number of GPUs. Distributed training will be enabled then. trainer_id(int, default 0): Must use together with num_trainers. trainer_id is the "rank" of current node starts from 0. Returns: A ParallelExecutor object. Raises: TypeError: If share_vars_from is provided, but not ParallelExecutor object. Examples: .. code-block:: python train_exe = fluid.ParallelExecutor( use_cuda=True, loss_name=loss.name) test_exe = fluid.ParallelExecutor( use_cuda=True, main_program=test_program, share_vars_from=train_exe) train_loss, = train_exe.run([loss.name], feed=feed_dict) test_loss, = test_exe.run([loss.name], feed=feed_dict) """ if len(kwargs) != 0: err_msg = "" for key in kwargs: if key in dir(ExecutionStrategy): err_msg += \ "Setting {0} by constructor is deprecated. Use " \ "strategy=ExecutionStrategy(); strategy.{0}=xxx; " \ "pe=ParallelExecutor(exec_strategy=strategy) " \ "instead.\n ".format(key) elif key in dir(BuildStrategy): err_msg += \ "Setting {0} by constructor is deprecated. Use " \ "strategy=BuildStrategy(); See help(" \ "paddle.fluid.ParallelExecutor.BuildStrategy) \n".format( key) else: err_msg += "Setting {0} by constructor is deprecated. Use strategy.\n".format( key) raise ValueError(err_msg) self._places = [] self._act_places = [] if use_cuda: for i in xrange(core.get_cuda_device_count()): p = core.Place() self._act_places.append(core.CUDAPlace(i)) p.set_place(self._act_places[-1]) self._places.append(p) else: for i in xrange(multiprocessing.cpu_count()): p = core.Place() self._act_places.append(core.CPUPlace()) p.set_place(self._act_places[-1]) self._places.append(p) assert self._places, "no place for execution" if exec_strategy is None: exec_strategy = ExecutionStrategy() if use_cuda: exec_strategy.use_event = True else: exec_strategy.use_event = False if exec_strategy.num_threads == 0: if use_cuda: # Experiments on se-resnext shows that too many threads hurt # performance. Worth tunning for other models in the future. exec_strategy.num_threads = len(self._places) * 2 else: exec_strategy.num_threads = min( len(self._places) * 2, multiprocessing.cpu_count()) if build_strategy is None: build_strategy = BuildStrategy() main = main_program main = main if main else framework.default_main_program() scope = executor.global_scope() if share_vars_from and not isinstance(share_vars_from, ParallelExecutor): raise TypeError("share_vars_from must be ParallelExecutor.") local_scopes = share_vars_from.executor.local_scopes( ) if share_vars_from else [] self.persistable_vars = [ v.name for v in filter( lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW, main.list_vars()) ] self.executor = core.ParallelExecutor( self._places, set([ p.name for p in main.global_block().iter_parameters() if not p.stop_gradient ]), set(self.persistable_vars), main.desc, loss_name if loss_name else '', scope, local_scopes, exec_strategy, build_strategy, num_trainers, trainer_id) self.scope = scope def run(self, fetch_list, feed=None, feed_dict=None): """ Run a parallel executor with fetch_list. The feed parameter can be a dict or a list. If feed is a dict, the feed data will be split into multiple devices. If feed is a list, we assume the data has been splitted into multiple devices, the each element in the list will be copied to each device directly. For example, if the feed is a dict: >>> exe = ParallelExecutor() >>> # the image will be splitted into devices. If there is two devices >>> # each device will process an image with shape (24, 1, 28, 28) >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))}) For example, if the feed is a list: >>> exe = ParallelExecutor() >>> # each device will process each element in the list. >>> # the 1st device will process an image with shape (48, 1, 28, 28) >>> # the 2nd device will process an image with shape (32, 1, 28, 28) >>> # >>> # you can use exe.device_count to get the device number. >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))}, >>> {"image": numpy.random.random(size=(32, 1, 28, 28))}, >>> ]) Args: fetch_list(list): The fetched variable names feed(list|dict|None): The feed variables. If the feed is a dict, tensors in that dict will be splitted into each devices. If the feed is a list, each element of the list will be copied to each device. feed_dict: Alias for feed parameter, for backward compatibility. This parameter is deprecated. Returns: fetched result list. """ if feed is None and feed_dict is not None: feed = feed_dict print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`" if isinstance(feed, dict): feed_tensor_dict = dict() for feed_name in feed: feed_tensor = feed[feed_name] if not isinstance(feed_tensor, core.LoDTensor): feed_tensor = core.LoDTensor() # always set to CPU place, since the tensor need to be splitted # it is fast in CPU feed_tensor.set(feed[feed_name], core.CPUPlace()) feed_tensor_dict[feed_name] = feed_tensor self.executor.feed_and_split_tensor_into_local_scopes( feed_tensor_dict) elif isinstance(feed, list) or isinstance(feed, tuple): if len(feed) != len(self._act_places): raise ValueError( "Feed a list of tensor, the list should be the same size as places" ) res = list() for i, each in enumerate(feed): if not isinstance(each, dict): raise TypeError( "Each element of feed list should be a dict") res_dict = dict() for feed_name in each: tensor = each[feed_name] if not isinstance(tensor, core.LoDTensor): tmp = core.LoDTensor() tmp.set(tensor, self._act_places[i]) tensor = tmp res_dict[feed_name] = tensor res.append(res_dict) self.executor.feed_tensors_into_local_scopes(res) fetch_var_name = '@FETCHED_VAR_NAME@' self.executor.run(fetch_list, fetch_var_name) arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array() return [arr[i] for i in range(len(arr))] def bcast_params(self): self.executor.bcast_params(set(self.persistable_vars)) @property def device_count(self): return len(self._act_places)
10,155
39.951613
98
py
Paddle
Paddle-master/python/paddle/fluid/recordio_writer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import core import contextlib __all__ = ['convert_reader_to_recordio_file'] @contextlib.contextmanager def create_recordio_writer(filename, compressor=core.RecordIOWriter.Compressor.Snappy, max_num_records=1000): writer = core.RecordIOWriter(filename, compressor, max_num_records) yield writer writer.close() def convert_reader_to_recordio_file( filename, reader_creator, feeder, compressor=core.RecordIOWriter.Compressor.Snappy, max_num_records=1000, feed_order=None): if feed_order is None: feed_order = feeder.feed_names counter = 0 with create_recordio_writer(filename, compressor, max_num_records) as writer: for batch in reader_creator(): res = feeder.feed(batch) for each in feed_order: writer.append_tensor(res[each]) writer.complete_append_tensor() counter += 1 return counter
1,646
32.612245
76
py
Paddle
Paddle-master/python/paddle/fluid/default_scope_funcs.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Default scope function. `Paddle` manages Scope as programming language's scope. It just a thread-local stack of Scope. Top of that stack is current scope, the bottom of that stack is all scopes' parent. Invoking `var/find_var` can `new/find` variable in current scope. Invoking `enter_local_scope/leave_local_scope` can create or destroy local scope. A `scoped_function` will take a `function` as input. That function will be invoked in a new local scope. """ import paddle.fluid.core import threading __tl_scope__ = threading.local() __all__ = [ 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var', 'find_var', 'scoped_function', ] def get_cur_scope(): """ Get current scope. :rtype: paddle.fluid.core.Scope """ cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None) if cur_scope_stack is None: __tl_scope__.cur_scope = list() if len(__tl_scope__.cur_scope) == 0: __tl_scope__.cur_scope.append(paddle.fluid.core.Scope()) return __tl_scope__.cur_scope[-1] def enter_local_scope(): """ Enter a new local scope """ cur_scope = get_cur_scope() new_scope = cur_scope.new_scope() __tl_scope__.cur_scope.append(new_scope) def leave_local_scope(): """ Leave local scope """ __tl_scope__.cur_scope.pop() get_cur_scope().drop_kids() def var(name): """ create variable in current scope. """ return get_cur_scope().var(name) def find_var(name): """ get variable in current scope. """ return get_cur_scope().find_var(name) def scoped_function(func): """ invoke `func` in new scope. :param func: a callable function that will be run in new scope. :type func: callable """ enter_local_scope() try: func() except: raise finally: leave_local_scope()
2,496
23.480392
75
py
Paddle
Paddle-master/python/paddle/fluid/inferencer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import core import executor import framework import io import parallel_executor import unique_name from trainer import check_and_get_place __all__ = ['Inferencer', ] class Inferencer(object): def __init__(self, infer_func, param_path, place=None, parallel=False): """ :param infer_func: a function that will return predict Variable :param param_path: the path where the inference model is saved by fluid.io.save_params :param place: place to do the inference :param parallel: use parallel_executor to run the inference, it will use multi CPU/GPU. """ self.param_path = param_path self.scope = core.Scope() self.parallel = parallel self.place = check_and_get_place(place) self.inference_program = framework.Program() with framework.program_guard(self.inference_program): with unique_name.guard(): self.predict_var = infer_func() with self._prog_and_scope_guard(): # load params from param_path into scope io.load_params(executor.Executor(self.place), param_path) if parallel: with self._prog_and_scope_guard(): self.exe = parallel_executor.ParallelExecutor( use_cuda=isinstance(self.place, core.CUDAPlace), loss_name=self.predict_var.name) else: self.exe = executor.Executor(self.place) def infer(self, inputs, return_numpy=True): """ :param inputs: a map of {"input_name": input_var} that will be feed into the inference program to get the predict value :return: the predict value of the inference model """ if not isinstance(inputs, dict): raise ValueError( "inputs should be a map of {'input_name': input_var}") with executor.scope_guard(self.scope): results = self.exe.run(self.inference_program, feed=inputs, fetch_list=[self.predict_var], return_numpy=return_numpy) return results @contextlib.contextmanager def _prog_and_scope_guard(self): with framework.program_guard(main_program=self.inference_program): with executor.scope_guard(self.scope): yield
3,014
35.768293
102
py
Paddle
Paddle-master/python/paddle/fluid/executor.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import contextlib from framework import Program, default_main_program, Variable from . import core __all__ = [ 'Executor', 'global_scope', 'scope_guard', 'switch_scope', 'fetch_var' ] g_scope = core.Scope() def global_scope(): return g_scope def switch_scope(scope): global g_scope ex = g_scope g_scope = scope return ex @contextlib.contextmanager def scope_guard(scope): ex = switch_scope(scope) yield switch_scope(ex) def as_numpy(tensor): if isinstance(tensor, list): return [as_numpy(t) for t in tensor] assert isinstance(tensor, core.LoDTensor) lod = tensor.lod() if len(lod) > 0: raise RuntimeError("Some of your fetched tensors hold LoD information. \ They can not be completely cast to Python ndarray. \ Please set the parameter 'return_numpy' as 'False' to \ return LoDTensor itself directly.") return np.array(tensor) def has_feed_operators(block, feed_targets, feed_holder_name): """ Check whether the block already has feed operators. Return false if the block does not have any feed operators. If some feed operators have been prepended to the block, check that the info contained in these feed operators matches the feed_targets and feed_holder_name. Raise exception when any mismatch is found. Return true when the block has feed operators with matching info. Args: block: a block instance (typically global block of a program) feed_targets: a dictionary of {feed_target_name: feed_target_data} feed_holder_name: the name of the variable that holds the data of all feed targets. The type of this feed_holder variable is FEED_MINIBATCH, which is essentially vector<LoDTensor>. Returns: A boolean value that indicates whether a block has feed operators that match the info contained in feed_targets and feed_holder_name. """ feed_count = 0 for op in block.ops: if op.desc.type() == 'feed': feed_count += 1 assert op.desc.input('X')[0] == feed_holder_name feed_target_name = op.desc.output('Out')[0] if feed_target_name not in feed_targets: raise Exception("'feed_targets' does not have {} variable". format(feed_target_name)) else: break if feed_count > 0 and feed_count != len(feed_targets): raise Exception( "Feed operators in program desc do not match 'feed_targets'") return feed_count > 0 def has_fetch_operators(block, fetch_targets, fetch_holder_name): """ Check whether the block already has fetch operators. Return false if the block does not have any fetch operators. If some fetch operators have been appended to the block, check that the info contained in these fetch operators matches the fetch_targets and fetch_holder_name. Raise exception when any mismatch is found. Return true when the block has fetch operators with matching info. Args: block: a block instance (typically global block of a program) fetch_targets: a dictionary of {fetch_target_name: fetch_target_data} fetch_holder_name: the name of the variable that holds the data of all fetch targets. The type of this fetch_holder variable is FETCH_LIST, which is essentially vector<LoDTensor>. Return: A boolean value that indicates whether a block has fetch operators that match the info contained in fetch_targets and fetch_holder_name. """ fetch_count = 0 for op in block.ops: if op.desc.type() == 'fetch': fetch_count += 1 assert op.desc.output('Out')[0] == fetch_holder_name fetch_target_name = op.desc.input('X')[0] if fetch_target_name not in [ var.desc.name() for var in fetch_targets ]: raise Exception("'fetch_targets' does not have {} variable". format(fetch_target_name)) idx = op.desc.attr('col') assert fetch_target_name == fetch_targets[idx].desc.name() if fetch_count > 0 and fetch_count != len(fetch_targets): raise Exception( "Fetch operators in program desc do not match 'fetch_targets'") return fetch_count > 0 def fetch_var(name, scope=None, return_numpy=True): """ Fetch the value of the variable with the given name from the given scope Args: name(str): name of the variable. Typically, only persistable variables can be found in the scope used for running the program. scope(core.Scope|None): scope object. It should be the scope where you pass to Executor.run() when running your program. If None, global_scope() will be used. return_numpy(bool): whether convert the tensor to numpy.ndarray Returns: LodTensor|numpy.ndarray """ assert isinstance(name, str) if scope is None: scope = global_scope() assert isinstance(scope, core.Scope) var = scope.find_var(name) assert var is not None, ( "Cannot find " + name + " in scope. Perhaps you need to make the" " variable persistable by using var.persistable = True in your" " program.") tensor = var.get_tensor() if return_numpy: tensor = as_numpy(tensor) return tensor def get_program_cache_key(feed, fetch_list): feed_var_names = feed.keys() def to_name_str(var): if isinstance(var, Variable): return var.desc.name() elif isinstance(var, str): return var else: raise TypeError(str(var) + " should be Variable or str") fetch_var_names = map(to_name_str, fetch_list) return str(feed_var_names + fetch_var_names) class Executor(object): def __init__(self, place): self.place = place p = core.Place() p.set_place(place) self.executor = core.Executor(p) self.program_caches = dict() def as_lodtensor(self, data): if isinstance(data, list): raise RuntimeError("Some of your feed data hold LoD information. \ They can not be completely cast from a list of Python \ ndarray to LoDTensor. Please convert data to LoDTensor \ directly before feeding the data.\ ") # single tensor case tensor = core.LoDTensor() tensor.set(data, self.place) return tensor def _get_program_cache(self, program_cache_key): return self.program_caches.get(program_cache_key, None) def _add_program_cache(self, program_cache_key, program): self.program_caches[program_cache_key] = program def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name, fetch_var_name): tmp_program = program.clone() global_block = tmp_program.global_block() if feed_var_name in global_block.vars: feed_var = global_block.var(feed_var_name) else: feed_var = global_block.create_var( name=feed_var_name, type=core.VarDesc.VarType.FEED_MINIBATCH, persistable=True) if fetch_var_name in global_block.vars: fetch_var = global_block.var(fetch_var_name) else: fetch_var = global_block.create_var( name=fetch_var_name, type=core.VarDesc.VarType.FETCH_LIST, persistable=True) # prepend feed operators if not has_feed_operators(global_block, feed, feed_var_name): for i, name in enumerate(feed): out = global_block.var(name) global_block.prepend_op( type='feed', inputs={'X': [feed_var]}, outputs={'Out': [out]}, attrs={'col': i}) # append fetch_operators if not has_fetch_operators(global_block, fetch_list, fetch_var_name): for i, var in enumerate(fetch_list): assert isinstance(var, Variable) or isinstance(var, str), ( "Wrong type for fetch_list[%s]: %s" % (i, type(var))) global_block.append_op( type='fetch', inputs={'X': [var]}, outputs={'Out': [fetch_var]}, attrs={'col': i}) return tmp_program def _feed_data(self, program, feed, feed_var_name, scope): # feed var to framework for op in program.global_block().ops: if op.desc.type() == 'feed': feed_target_name = op.desc.output('Out')[0] cur_feed = feed[feed_target_name] if not isinstance(cur_feed, core.LoDTensor): cur_feed = self.as_lodtensor(cur_feed) idx = op.desc.attr('col') core.set_feed_variable(scope, cur_feed, feed_var_name, idx) else: break def _fetch_data(self, fetch_list, fetch_var_name, scope): outs = [ core.get_fetch_variable(scope, fetch_var_name, i) for i in xrange(len(fetch_list)) ] return outs def run(self, program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False): """ Run program by this Executor. Feed data by feed map, fetch result by fetch_list. Python executor takes a program, add feed operators and fetch operators to this program according to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides the variables(or names) that user want to get after program run. Note: the executor will run all operators in the program but not only the operators dependent by the fetch_list :param program: the program that need to run, if not provied, then default_main_program will be used. :param feed: feed variable map, e.g. {"image": ImageData, "label": LableData} :param fetch_list: a list of variable or variable names that user want to get, run will return them according to this list. :param feed_var_name: the name for the input variable of feed Operator. :param fetch_var_name: the name for the output variable of feed Operator. :param scope: the scope used to run this program, you can switch it to different scope. default is global_scope :param return_numpy: if convert the fetched tensor to numpy :param use_program_cache: set use_program_cache to true if program not changed compare to the last step. :return: result according to fetch_list. """ if feed is None: feed = {} if not isinstance(feed, dict): raise TypeError( "feed requires dict as its Parameter. But you passed in %s" % (type(feed))) if fetch_list is None: fetch_list = [] if program is None: program = default_main_program() if not isinstance(program, Program): raise TypeError( "Executor requires Program as its Parameter. But you passed in %s" % (type(program))) if scope is None: scope = global_scope() cache_key = get_program_cache_key(feed, fetch_list) if use_program_cache: cached_program = self._get_program_cache(cache_key) if cached_program is None: cached_program = self._add_feed_fetch_ops( program=program, feed=feed, fetch_list=fetch_list, feed_var_name=feed_var_name, fetch_var_name=fetch_var_name) self._add_program_cache(cache_key, cached_program) program = cached_program else: self.program_caches.pop(cache_key, None) program = self._add_feed_fetch_ops( program=program, feed=feed, fetch_list=fetch_list, feed_var_name=feed_var_name, fetch_var_name=fetch_var_name) self._feed_data(program, feed, feed_var_name, scope) self.executor.run(program.desc, scope, 0, True, True) outs = self._fetch_data(fetch_list, fetch_var_name, scope) if return_numpy: outs = as_numpy(outs) return outs
13,360
37.727536
119
py
Paddle
Paddle-master/python/paddle/fluid/evaluator.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import numpy as np import layers from framework import Program, Variable, program_guard import unique_name from layer_helper import LayerHelper from initializer import Constant __all__ = [ 'ChunkEvaluator', 'EditDistance', 'DetectionMAP', ] def _clone_var_(block, var): assert isinstance(var, Variable) return block.create_var( name=var.name, shape=var.shape, dtype=var.dtype, type=var.type, lod_level=var.lod_level, persistable=True) class Evaluator(object): """ Base Class for all evaluators Args: name(str): The name of evaluator. such as, "accuracy". Used for generate temporary variable name. main_program(Program, optional): The evaluator should be added to this main_program. Default default_main_program() startup_program(Program, optional):The parameter should be added to this startup_program. Default default_startup_program() Attributes: states(list): The list of state variables. states will be reset to zero when `reset` is invoked. metrics(list): The list of metrics variables. They will be calculate every mini-batch """ def __init__(self, name, **kwargs): warnings.warn( "The %s is deprecated, because maintain a modified program inside evaluator cause bug easily, please use fluid.metrics.%s instead." % (self.__class__.__name__, self.__class__.__name__), Warning) self.states = [] self.metrics = [] self.helper = LayerHelper(name, **kwargs) def reset(self, executor, reset_program=None): """ reset metric states at the begin of each pass/user specified batch """ if reset_program is None: reset_program = Program() with program_guard(main_program=reset_program): for var in self.states: assert isinstance(var, Variable) g_var = _clone_var_(reset_program.current_block(), var) layers.fill_constant( shape=g_var.shape, value=0.0, dtype=g_var.dtype, out=g_var) executor.run(reset_program) def eval(self, executor, eval_program=None): """ Evaluate the statistics merged by multiple mini-batches. """ raise NotImplementedError() def create_state(self, suffix, dtype, shape): """ Create state variable. NOTE: It is not a public API. Args: suffix(str): the state suffix. dtype(str|core.VarDesc.VarType): the state data type shape(tuple|list): the shape of state Returns: State variable """ state = self.helper.create_variable( name="_".join([unique_name.generate(self.helper.name), suffix]), persistable=True, dtype=dtype, shape=shape) self.states.append(state) return state class ChunkEvaluator(Evaluator): """ Accumulate counter numbers output by chunk_eval from mini-batches and compute the precision recall and F1-score using the accumulated counter numbers. """ def __init__( self, input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None, ): super(ChunkEvaluator, self).__init__("chunk_eval") main_program = self.helper.main_program if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") self.num_infer_chunks = self.create_state( dtype='int64', shape=[1], suffix='num_infer_chunks') self.num_label_chunks = self.create_state( dtype='int64', shape=[1], suffix='num_label_chunks') self.num_correct_chunks = self.create_state( dtype='int64', shape=[1], suffix='num_correct_chunks') precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks = layers.chunk_eval( input=input, label=label, chunk_scheme=chunk_scheme, num_chunk_types=num_chunk_types, excluded_chunk_types=excluded_chunk_types, ) layers.sums( input=[self.num_infer_chunks, num_infer_chunks], out=self.num_infer_chunks) layers.sums( input=[self.num_label_chunks, num_label_chunks], out=self.num_label_chunks) layers.sums( input=[self.num_correct_chunks, num_correct_chunks], out=self.num_correct_chunks) self.metrics.extend([precision, recall, f1_score]) def eval(self, executor, eval_program=None): if eval_program is None: eval_program = Program() block = eval_program.current_block() num_infer_chunks, num_label_chunks, num_correct_chunks = executor.run( eval_program, fetch_list=[_clone_var_(block, state) for state in self.states]) num_infer_chunks = num_infer_chunks[0] num_label_chunks = num_label_chunks[0] num_correct_chunks = num_correct_chunks[0] precision = float( num_correct_chunks) / num_infer_chunks if num_infer_chunks else 0 recall = float( num_correct_chunks) / num_label_chunks if num_label_chunks else 0 f1_score = float(2 * precision * recall) / ( precision + recall) if num_correct_chunks else 0 return np.array( [precision], dtype='float32'), np.array( [recall], dtype='float32'), np.array( [f1_score], dtype='float32') class EditDistance(Evaluator): """ Accumulate edit distance sum and sequence number from mini-batches and compute the average edit_distance and instance error of all batches. Args: input: the sequences predicted by network. label: the target sequences which must has same sequence count with input. ignored_tokens(list of int): Tokens that should be removed before calculating edit distance. Example: exe = fluid.executor(place) distance_evaluator = fluid.Evaluator.EditDistance(input, label) for epoch in PASS_NUM: distance_evaluator.reset(exe) for data in batches: loss = exe.run(fetch_list=[cost]) distance, instance_error = distance_evaluator.eval(exe) In the above example: 'distance' is the average of the edit distance in a pass. 'instance_error' is the instance error rate in a pass. """ def __init__(self, input, label, ignored_tokens=None, **kwargs): super(EditDistance, self).__init__("edit_distance", **kwargs) main_program = self.helper.main_program if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") self.total_distance = self.create_state( dtype='float32', shape=[1], suffix='total_distance') self.seq_num = self.create_state( dtype='int64', shape=[1], suffix='seq_num') self.instance_error = self.create_state( dtype='int64', shape=[1], suffix='instance_error') distances, seq_num = layers.edit_distance( input=input, label=label, ignored_tokens=ignored_tokens) zero = layers.fill_constant(shape=[1], value=0.0, dtype='float32') compare_result = layers.equal(distances, zero) compare_result_int = layers.cast(x=compare_result, dtype='int') seq_right_count = layers.reduce_sum(compare_result_int) instance_error_count = layers.elementwise_sub( x=seq_num, y=seq_right_count) total_distance = layers.reduce_sum(distances) layers.sums( input=[self.total_distance, total_distance], out=self.total_distance) layers.sums(input=[self.seq_num, seq_num], out=self.seq_num) layers.sums( input=[self.instance_error, instance_error_count], out=self.instance_error) self.metrics.append(total_distance) self.metrics.append(instance_error_count) def eval(self, executor, eval_program=None): if eval_program is None: eval_program = Program() block = eval_program.current_block() with program_guard(main_program=eval_program): total_distance = _clone_var_(block, self.total_distance) seq_num = _clone_var_(block, self.seq_num) instance_error = _clone_var_(block, self.instance_error) seq_num = layers.cast(x=seq_num, dtype='float32') instance_error = layers.cast(x=instance_error, dtype='float32') avg_distance = layers.elementwise_div(x=total_distance, y=seq_num) avg_instance_error = layers.elementwise_div( x=instance_error, y=seq_num) result = executor.run( eval_program, fetch_list=[avg_distance, avg_instance_error]) return np.array(result[0]), np.array(result[1]) class DetectionMAP(Evaluator): """ Calculate the detection mean average precision (mAP). TODO (Dang Qingqing): update the following doc. The general steps are as follows: 1. calculate the true positive and false positive according to the input of detection and labels. 2. calculate mAP value, support two versions: '11 point' and 'integral'. Please get more information from the following articles: https://sanchom.wordpress.com/tag/average-precision/ https://arxiv.org/abs/1512.02325 Args: input (Variable): The detection results, which is a LoDTensor with shape [M, 6]. The layout is [label, confidence, xmin, ymin, xmax, ymax]. gt_label (Variable): The ground truth label index, which is a LoDTensor with shape [N, 1]. gt_box (Variable): The ground truth bounding box (bbox), which is a LoDTensor with shape [N, 6]. The layout is [xmin, ymin, xmax, ymax]. gt_difficult (Variable|None): Whether this ground truth is a difficult bounding bbox, which can be a LoDTensor [N, 1] or not set. If None, it means all the ground truth labels are not difficult bbox. class_num (int): The class number. background_label (int): The index of background label, the background label will be ignored. If set to -1, then all categories will be considered, 0 by defalut. overlap_threshold (float): The threshold for deciding true/false positive, 0.5 by defalut. evaluate_difficult (bool): Whether to consider difficult ground truth for evaluation, True by defalut. This argument does not work when gt_difficult is None. ap_version (string): The average precision calculation ways, it must be 'integral' or '11point'. Please check https://sanchom.wordpress.com/tag/average-precision/ for details. - 11point: the 11-point interpolated average precision. - integral: the natural integral of the precision-recall curve. Example: exe = fluid.executor(place) map_evaluator = fluid.Evaluator.DetectionMAP(input, gt_label, gt_box, gt_difficult) cur_map, accum_map = map_evaluator.get_map_var() fetch = [cost, cur_map, accum_map] for epoch in PASS_NUM: map_evaluator.reset(exe) for data in batches: loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch) In the above example: 'cur_map_v' is the mAP of current mini-batch. 'accum_map_v' is the accumulative mAP of one pass. """ def __init__(self, input, gt_label, gt_box, gt_difficult=None, class_num=None, background_label=0, overlap_threshold=0.5, evaluate_difficult=True, ap_version='integral'): super(DetectionMAP, self).__init__("map_eval") gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype) if gt_difficult: gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype) label = layers.concat([gt_label, gt_difficult, gt_box], axis=1) else: label = layers.concat([gt_label, gt_box], axis=1) # calculate mean average precision (mAP) of current mini-batch map = layers.detection_map( input, label, class_num, background_label, overlap_threshold=overlap_threshold, evaluate_difficult=evaluate_difficult, ap_version=ap_version) self.create_state(dtype='int32', shape=None, suffix='accum_pos_count') self.create_state(dtype='float32', shape=None, suffix='accum_true_pos') self.create_state(dtype='float32', shape=None, suffix='accum_false_pos') self.has_state = None var = self.helper.create_variable( persistable=True, dtype='int32', shape=[1]) self.helper.set_variable_initializer( var, initializer=Constant(value=int(0))) self.has_state = var # calculate accumulative mAP accum_map = layers.detection_map( input, label, class_num, background_label, overlap_threshold=overlap_threshold, evaluate_difficult=evaluate_difficult, has_state=self.has_state, input_states=self.states, out_states=self.states, ap_version=ap_version) layers.fill_constant( shape=self.has_state.shape, value=1, dtype=self.has_state.dtype, out=self.has_state) self.cur_map = map self.accum_map = accum_map def get_map_var(self): return self.cur_map, self.accum_map def reset(self, executor, reset_program=None): if reset_program is None: reset_program = Program() with program_guard(main_program=reset_program): var = _clone_var_(reset_program.current_block(), self.has_state) layers.fill_constant( shape=var.shape, value=0, dtype=var.dtype, out=var) executor.run(reset_program)
15,088
37.989664
143
py
Paddle
Paddle-master/python/paddle/fluid/framework.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import contextlib import re import numpy as np import proto.framework_pb2 as framework_pb2 from . import core import unique_name __all__ = [ 'Block', 'Variable', 'Program', 'Operator', 'default_startup_program', 'default_main_program', 'program_guard', 'switch_startup_program', 'switch_main_program', 'get_var', ] EMPTY_VAR_NAME = core.kEmptyVarName() TEMP_VAR_NAME = core.kTempVarName() GRAD_VAR_SUFFIX = core.kGradVarSuffix() ZERO_VAR_SUFFIX = core.kZeroVarSuffix() def grad_var_name(var_name): """ return gradient name for a certain var name """ return var_name + GRAD_VAR_SUFFIX def convert_np_dtype_to_dtype_(np_dtype): """ Convert the data type in numpy to the data type in Paddle Args: np_dtype(np.dtype): the data type in numpy Returns(core.VarDesc.VarType): the data type in Paddle """ dtype = np.dtype(np_dtype) if dtype == np.float32: return core.VarDesc.VarType.FP32 elif dtype == np.float64: return core.VarDesc.VarType.FP64 elif dtype == np.float16: return core.VarDesc.VarType.FP16 elif dtype == np.int32: return core.VarDesc.VarType.INT32 elif dtype == np.int16: return core.VarDesc.VarType.INT16 elif dtype == np.int64: return core.VarDesc.VarType.INT64 elif dtype == np.bool: return core.VarDesc.VarType.BOOL elif dtype == np.uint8: return core.VarDesc.VarType.UINT8 else: raise ValueError("Not supported numpy dtype " + str(dtype)) def dtype_is_floating(dtype): """ Check the data type is floating or not. Args: dtype(np.dtype|core.VarDesc.VarType): data type. Could be numpy format or Paddle format Returns(bool): True if data type is a float value """ if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) return dtype in [ core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64 ] def _debug_string_(proto, throw_on_error=True): """ Get the debug string of a protobuf message. The message could be not initialized. Args: proto(google.protobuf.message.Message): The protobuf message throw_on_error(bool): True if raise an error when the protobuf message is not initialized. Returns(str): The debug string of the protobuf message """ error_fields = list() if not proto.IsInitialized(error_fields) and throw_on_error: raise ValueError("{0} are not initialized.\nThe message is {1}:\n". format(error_fields, proto)) return proto.__str__() class Variable(object): """ Python variable. Every input and output of an operator is a variable. Every variable belongs to a block. The variable has a name and two variables in different blocks could have the same name. There are many kinds of variables. Please reference the framework.proto for details. Notes: The constructor of Variable should not be invoked directly. Please use `Block.create_var` to create a variable. >>> cur_program = Program() >>> cur_block = cur_program.current_block() >>> new_variable = cur_block.create_var( >>> name="X", shape=[-1, 23, 48], dtype='float32') Args: block(Block): The associated block. It will be passed by `Block.create_var` automatically. type(core.VarDesc.VarType): Variable type. Please reference the framework.proto for details. shape(tuple|list|None): The shape of variable. -1 means the batch size. Some kinds of variable do not contain shape, just set it to None. dtype(np.dtype|core.VarDesc.VarType|str): The data type of variable. lod_level(int): The level of lod tensor. 0 means it is not a time series data. capacity(int): The capacity of Channel variable. Ignored for other types. persistable(bool): True if the variable should be saved as check point. Defaults to False. stop_gradient(bool): True if the variable will stop to calculate gradients when backward. Defaults to False. """ def __init__(self, block, type=core.VarDesc.VarType.LOD_TENSOR, name=None, shape=None, dtype=None, lod_level=None, capacity=None, persistable=None, error_clip=None, stop_gradient=False, is_data=False, **kwargs): self.block = block self.error_clip = error_clip if name is None: name = unique_name.generate('_generated_var') is_new_var = False self.desc = self.block.desc.find_var(name) if self.desc is None: self.desc = self.block.desc.var(name) is_new_var = True if is_new_var: self.desc.set_type(type) elif self.desc.type() != type: raise ValueError("Variable {0} has been created before. The " "previous type is {1}; the new type is {2}. They" " are not matched".format(self.name, self.desc.type(), type)) if shape is not None: if is_new_var: self.desc.set_shape(shape) else: old_shape = self.shape shape = tuple(shape) if shape != old_shape: raise ValueError( "Variable {0} has been created before. the previous " "shape is {1}; the new shape is {2}. They are not " "matched.".format(self.name, old_shape, shape)) if dtype is not None: if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if is_new_var: self.desc.set_dtype(dtype) else: old_dtype = self.dtype if dtype != old_dtype: raise ValueError("Variable {0} has been created before. " "The previous data type is {1}; the new " "data type is {2}. They are not " "matched.".format(self.name, old_dtype, dtype)) if lod_level is not None: if is_new_var: self.desc.set_lod_level(lod_level) else: if lod_level != self.lod_level: raise ValueError("Variable {0} has been created before. " "The previous lod_level is {1}; the new " "lod_level is {2}. They are not " "matched".format(self.name, self.lod_level, lod_level)) if persistable is not None: if is_new_var: self.desc.set_persistable(persistable) else: if persistable != self.persistable: raise ValueError( "Variable {0} has been created before." "The previous persistable is {1}; the new " "persistable is {2}. They are not matched".format( self.name, self.persistable, persistable)) if capacity is not None: if is_new_var: self.desc.set_capacity(capacity) else: # TODO(abhinavarora) : Compare with set capacity once, # get_capacity is implemented pass self.block.vars[name] = self self.op = None self.stop_gradient = stop_gradient self.is_data = is_data def __str__(self): return self.to_string(True) def to_string(self, throw_on_error, with_details=False): """ Get debug string. Args: throw_on_error(bool): True if raise an exception when self is not intialized. with_details(bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True Returns(str): The debug string. """ assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) protostr = self.desc.serialize_to_string() proto = framework_pb2.VarDesc.FromString(str(protostr)) res_str = _debug_string_(proto, throw_on_error) if with_details: additional_attr = ("error_clip", "stop_gradient") for attr_name in additional_attr: res_str += "%s: %s\n" % (attr_name, str(getattr(self, attr_name))) return res_str __repr__ = __str__ def set_desc(self, input): self.desc = input @property def persistable(self): return self.desc.persistable() @persistable.setter def persistable(self, p): self.desc.set_persistable(p) @property def name(self): return self.desc.name() @name.setter def name(self, new_name): self.desc.set_name(new_name) @property def shape(self): # convert to tuple, make it as same as numpy API. return tuple(self.desc.shape()) @property def dtype(self): return self.desc.dtype() @property def lod_level(self): return self.desc.lod_level() @property def type(self): return self.desc.type() def set_error_clip(self, error_clip): self.error_clip = error_clip def get_all_op_protos(): """ Get all registered op proto from PaddlePaddle C++ end. Returns(list): list of OpProto """ protostrs = core.get_all_op_protos() ret_values = [] for pbstr in protostrs: op_proto = framework_pb2.OpProto.FromString(str(pbstr)) ret_values.append(op_proto) return ret_values class OpProtoHolder(object): """ A global variable to hold all OpProtos from C++ as a map """ @classmethod def instance(cls): if not hasattr(cls, '_instance'): cls._instance = cls() return cls._instance def __init__(self): assert not hasattr( self.__class__, '_instance'), 'Please use `instance()` to get OpProtoHolder object!' op_protos = get_all_op_protos() self.op_proto_map = {} for proto in op_protos: self.op_proto_map[proto.type] = proto def get_op_proto(self, type): """ Get OpProto by a type string. Args: type(str): The type that operator registered in C++ side. Returns(framework_pb2.OpProto): The OpProto """ if type not in self.op_proto_map: raise ValueError("Operator \"%s\" has not been registered." % type) return self.op_proto_map[type] class Operator(object): """ Python Operator class. The operator represents the build in instructions in a Block. Users can use the build in instructions to describe their neural network. """ def __init__(self, block, desc, type=None, inputs=None, outputs=None, attrs=None): """ Constructor. Notes: The constructor of operator should not be invoked directly. Use Block.append_op or Block.prepend_op instead. >>> cur_program = Program() >>> cur_block = cur_program.current_block() >>> # var1 += var2 + var3 >>> cur_block.append_op(type="sum", >>> inputs={"X": [var1, var2, var3]}, >>> outputs={"Out": [var1]}) Args: block(Block): The block has the current operator. desc(core.OpDesc): The protobuf description. type(str): The type of operator. inputs(dict): The input dictionary. Key is the input parameter name. Value is a list of variables. outputs(dict): The output dictionary which has the same format with inputs. attrs(dict): The attributes dictionary. Key is attribute name. Value is the attribute value. The attribute type should be as same as the type registered in C++ """ self.block = block self.desc = desc self.attrs = attrs if self.attrs is None: self.attrs = dict() del attrs op_maker = core.op_proto_and_checker_maker if op_maker.kOpRoleAttrName() not in self.attrs: self.attrs[op_maker.kOpRoleAttrName()] = self.block.program.op_role role_var_name = op_maker.kOpRoleVarAttrName() if len(self.block.program. op_role_var) != 0 and role_var_name not in self.attrs: self.attrs[role_var_name] = self.block.program.op_role_var if role_var_name in self.attrs and len(self.attrs[role_var_name]) == 0: del self.attrs[role_var_name] if len(self.desc.type()) != 0: return if type is None: raise ValueError( "`type` to initilized an Operator can not be None.") self.desc.set_type(type) proto = OpProtoHolder.instance().get_op_proto(type) def find_name(var_list, name): for var_name in var_list: if var_list[var_name] is not None and var_name == name: return True return False if inputs is not None: for in_proto in proto.inputs: found = find_name(inputs, in_proto.name) assert found or in_proto.dispensable, "Input {} not found".format( in_proto.name) if found: in_args = inputs[in_proto.name] if not isinstance(in_args, list): in_args = [in_args] if not in_proto.duplicable and len(in_args) > 1: raise ValueError( "Input %s expects only one input, but %d are given." % (in_proto.name, len(in_args))) in_arg_names = [] for arg in in_args: if isinstance(arg, basestring): in_arg_names.append(arg) else: in_arg_names.append(arg.name) self.desc.set_input(in_proto.name, in_arg_names) else: self.desc.set_input(in_proto.name, []) if outputs is not None: given = set() need = set() for n in outputs: given.add(n) for m in proto.outputs: need.add(m.name) if not given == need: raise ValueError(("Incorrect setting for output(s) of " "operator \"%s\". Need: [%s] Given: [%s]") % (type, ", ".join(str(e) for e in need), ", ".join(str(e) for e in given))) for out_proto in proto.outputs: out_args = outputs[out_proto.name] if not isinstance(out_args, list): out_args = [out_args] if not out_proto.duplicable and len(out_args) > 1: raise ValueError( "Output %s expects only one output, but %d are given." % (out_proto.name, len(out_args))) out_arg_names = [] for arg in out_args: out_arg_names.append(arg.name) arg.op = self self.desc.set_output(out_proto.name, out_arg_names) if self.attrs is not None: if not isinstance(self.attrs, dict): raise TypeError("'attrs' should be a dict.") for attr in proto.attrs: attr_name = attr.name if (attr_name not in self.attrs) or ( self.attrs[attr_name] is None): continue if isinstance(self.attrs[attr_name], Block): self.desc.set_block_attr(attr_name, self.attrs[attr_name].desc) elif isinstance(self.attrs[attr_name], core.BlockDesc) or \ isinstance(self.attrs[attr_name], core.ProgramDesc): self.desc.set_serialized_attr( attr_name, self.attrs[attr_name].serialize_to_string()) else: self.desc.set_attr(attr_name, self.attrs[attr_name]) self.desc.check_attrs() no_kernel_op_set = { 'feed', 'fetch', 'save', 'load', 'recurrent', 'go', 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv', 'parallel_do', 'save_combine', 'load_combine', 'ncclInit', 'channel_create', 'channel_close', 'channel_send', 'channel_recv', 'select', 'gen_nccl_id' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) self.desc.infer_shape(self.block.desc) def to_string(self, throw_on_error): """ To debug string. Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True Returns(str): The debug string. """ protostr = self.desc.serialize_to_string() proto = framework_pb2.OpDesc.FromString(str(protostr)) return _debug_string_(proto, throw_on_error) def __str__(self): return self.to_string(True) __repr__ = __str__ @property def type(self): return self.desc.type() def input(self, name): """ Get input arguments by the input parameter name Args: name(str): The input parameter name Returns(list): return the list of argument names associated with the specific parameter name. """ return self.desc.input(name) def rename_input(self, old_name, new_name): self.desc.rename_input(old_name, new_name) def rename_output(self, old_name, new_name): self.desc.rename_output(old_name, new_name) @property def input_names(self): """ Get all input parameter names Returns(list): return a list of input parameter names """ return self.desc.input_names() @property def input_arg_names(self): return self.desc.input_arg_names() @property def output_arg_names(self): return self.desc.output_arg_names() def output(self, name): """ Get output arguments by the output parameter name Args: name(str): The output parameter name Returns(list): return the list of argument names associated with the specific parameter name. """ return self.desc.output(name) @property def output_names(self): """ Get all output parameter names Returns(list): return a list of output parameter names """ return self.desc.output_names() @property def idx(self): """ Return the array index of current operator. Returns(int): The array index in block.ops array Raises: ValueError: when the operator is not found. """ for i, op in enumerate(self.block.ops): if op == self: return i raise ValueError( "Can't find op itself in it's block. It could be a bug of Paddle.") def has_attr(self, name): """ operator has the attribute with name or not. Args: name(str): the attribute name Returns(bool): True if has this attribute. """ return self.desc.has_attr(name) def attr_type(self, name): """ Get the type of attribute by attribute name Args: name(str): the attribute name Returns(core.AttrType): the attribute type """ return self.desc.attr_type(name) def set_attr(self, name, val): self.attrs[name] = val self.desc.set_attr(name, val) @property def attr_names(self): """ Get all attribute names Returns(list): The list of attribute name """ return self.desc.attr_names() def attr(self, name): """ Get attribute by name Args: name(str): the attribute name Returns(bool|int|str|float|list): The attribute value. The return value can be any valid attribute type. """ return self.desc.attr(name) def block_attr(self, name): """ Get the block attribute by name Args: name(str): the attribute name Returns(int): the block index """ return self.desc.block_attr(name) def all_attrs(self): """ Get the attribute dict Returns(dict): The Operator's attribute dict """ attr_names = self.attr_names attr_map = {} for n in attr_names: if n == 'sub_block': attr_map[n] = self.block_attr(n) else: attr_map[n] = self.attr(n) return attr_map class Block(object): def __init__(self, program, idx): self.desc = program.desc.block(idx) self.vars = collections.OrderedDict() # var_name --> var self.ops = list() # operator list self.program = program self.removed_vars = collections.OrderedDict() def __str__(self): return self.to_string(True) def to_string(self, throw_on_error, with_details=False): """ To debug string. Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True with_details(bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True Returns(str): The debug string. """ assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) if with_details: re_add_indent = re.compile(r"\n(.)") res_str = "blocks {\n idx: %d\n parent_idx: %d" % ( self.idx, self.parent_idx) for var in self.vars.itervalues(): res_str += "\n vars {\n %s }" % re_add_indent.sub( r"\n \1", var.to_string(throw_on_error, with_details)) for op in self.ops: res_str += "\n ops {\n %s }" % re_add_indent.sub( r"\n \1", op.to_string(throw_on_error)) res_str += "\n}" else: protostr = self.desc.serialize_to_string() proto = framework_pb2.BlockDesc.FromString(str(protostr)) res_str = _debug_string_(proto, throw_on_error) return res_str __repr__ = __str__ @property def parent_idx(self): return self.desc.parent @property def forward_block_idx(self): return self.desc.get_forward_block_idx() def set_forward_block_idx(self, idx): self.desc.set_forward_block_idx(idx) @property def idx(self): return self.desc.id def var(self, name): if not isinstance(name, basestring): raise TypeError() v = self.vars.get(name, None) if v is None: raise ValueError("var %s not in this block" % name) return v def var_recursive(self, name): frontier = list() visited = set() frontier.append(self) prog = self.program while len(frontier) != 0: # BFS cur = frontier[0] frontier = frontier[1:] if id(cur) in visited: continue if cur.has_var(name): return cur.var(name) if cur.parent_idx != -1: frontier.append(prog.block(cur.parent_idx)) if cur.forward_block_idx != -1: frontier.append(prog.block(cur.forward_block_idx)) visited.add(id(cur)) raise ValueError("Var {0} is not found recursively".format(name)) def all_parameters(self): return list(self.iter_parameters()) def iter_parameters(self): return (item[1] for item in self.vars.iteritems() if isinstance(item[1], Parameter)) def create_var(self, *args, **kwargs): var = Variable(block=self, *args, **kwargs) if 'initializer' in kwargs: kwargs['initializer'](var, self) return var def has_var(self, name): return name in self.vars def rename_var(self, name, new_name): """ Rename variable in vars and ops' inputs and outputs """ if not self.has_var(name): raise ValueError("var %s is not in current block" % name) v = self.var(name) if type(v) == Parameter: var_type = "Parameter" stop_gradient = v.stop_gradient trainable = v.trainable optimize_attr = v.optimize_attr regularizer = v.regularizer gradient_clip_attr = v.gradient_clip_attr error_clip = v.error_clip elif type(v) == Variable: var_type = "Variable" error_clip = v.error_clip stop_gradient = v.stop_gradient else: raise ValueError("unsupported var type: %s", type(v)) orig_var_type = v.type self.desc.rename_var(name, new_name) # NOTE: v is destroyed by C++ after calling rename_var. d = self.desc.find_var(new_name) if var_type == "Parameter": var = Parameter( self, d.shape(), d.dtype(), type=orig_var_type, name=new_name, stop_gradient=stop_gradient, trainable=trainable, optimize_attr=optimize_attr, regularizer=regularizer, gradient_clip_attr=gradient_clip_attr, error_clip=error_clip) elif var_type == "Variable": var = Variable( self, type=orig_var_type, name=new_name, error_clip=error_clip, stop_gradient=stop_gradient) # rename the python side, sync_with_cpp will only add # new vars/ops to python side. self.vars[new_name] = var del self.vars[name] self.sync_with_cpp() return var def remove_var(self, name): self.sync_with_cpp() self.desc.remove_var(name) del self.vars[name] def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() param = Parameter(global_block, *args, **kwargs) if 'initializer' in kwargs: kwargs['initializer'](param, self) return param def append_op(self, *args, **kwargs): op_desc = self.desc.append_op() op = Operator(block=self, desc=op_desc, *args, **kwargs) self.ops.append(op) return op def insert_op(self, index, *args, **kwargs): self.sync_with_cpp() op_desc = self.desc.insert_op(index) op = Operator(block=self, desc=op_desc, *args, **kwargs) self.ops.insert(index, op) return op def remove_op(self, index): self.sync_with_cpp() self.desc.remove_op(index, index + 1) del self.ops[index] def slice_ops(self, start, end): return self.ops[start:end] def prepend_op(self, *args, **kwargs): op_desc = self.desc.prepend_op() op = Operator(self, op_desc, *args, **kwargs) self.ops.insert(0, op) return op def sync_with_cpp(self): """ Sync from the desc on the c++ end. This method is used to synchronize the c++ desc instance generated by backward. """ # sync variables from cpp for var in self.desc.all_vars(): if not self.has_var(var.name()): self.create_var(name=var.name(), desc=var, type=var.type()) # sync variables removed from c++ end for var in self.vars.keys(): if not self.desc.find_var(var): self.vars.pop(var) # sync operators from cpp ops_in_cpp = [] for op_idx in range(0, self.desc.op_size()): ops_in_cpp.append(self.desc.op(op_idx)) if len(self.ops) != 0: first_op_in_python = self.ops[0].desc last_op_in_python = self.ops[len(self.ops) - 1].desc start_index = None end_index = None for index in range(len(ops_in_cpp)): if first_op_in_python == ops_in_cpp[index]: start_index = index if last_op_in_python == ops_in_cpp[index]: end_index = index assert start_index is not None assert end_index is not None assert start_index <= end_index else: start_index = 0 end_index = -1 # sync ops append to the head of cpp_ops for index in range((start_index - 1 - 1), -1, -1): op_desc = ops_in_cpp[index] op = Operator(self, op_desc) self.ops.insert(0, op) # sync ops append to the end of cpp_ops for index in range((end_index + 1), len(ops_in_cpp)): op_desc = ops_in_cpp[index] op = Operator(self, op_desc) self.ops.append(op) # sync ops removed from c++ end if end_index != -1 and end_index < len(self.ops): ops_in_cpp_index = 0 ops_in_python_index = 0 while ops_in_python_index < len( self.ops) and ops_in_cpp_index < len(ops_in_cpp): if self.ops[ops_in_python_index].desc != ops_in_cpp[ ops_in_cpp_index]: del self.ops[ops_in_python_index] else: ops_in_cpp_index += 1 ops_in_python_index += 1 assert len(self.ops) == len(ops_in_cpp) for index in range(len(self.ops)): assert self.ops[index].desc == ops_in_cpp[index] def copy_param_info_from(self, other): """ Copy the information of parameters from the other block Args: other(Block): the other block Returns: None """ if not isinstance(other, Block): raise TypeError("copy_param_info_from should be invoked with Block") for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) if v is None: raise ValueError("copy_param_info_from should be invoked with " "same topology") assert isinstance(v, Variable) new_p = Parameter( block=self, shape=v.shape, dtype=v.dtype, type=v.type, lod_level=v.lod_level, stop_gradient=p.stop_gradient, trainable=p.trainable, optimize_attr=p.optimize_attr, regularizer=p.regularizer, gradient_clip_attr=p.gradient_clip_attr, error_clip=p.error_clip, name=v.name) self.vars[new_p.name] = new_p def clone_variable(self, var): """ Clone a variable into current block. Args: var: the variable to be cloned. Returns: The new variable cloned from 'var' in current block. """ assert isinstance(var, Variable) ret_var = None # make STEP_SCOPES var can be safely cloned. if var.type == core.VarDesc.VarType.STEP_SCOPES: ret_var = self.create_var( name=var.name, persistable=var.persistable, type=var.type) elif var.type == core.VarDesc.VarType.SELECTED_ROWS: ret_var = self.create_var( name=var.name, shape=var.shape, dtype=var.dtype, type=var.type, persistable=True, is_data=var.is_data) else: ret_var = self.create_var( name=var.name, shape=var.shape, dtype=var.dtype, type=var.type, lod_level=var.lod_level, persistable=True, is_data=var.is_data) return ret_var class Program(object): def __init__(self): self.desc = core.ProgramDesc() self.blocks = [Block(self, 0)] self.current_block_idx = 0 self._seed = 0 self._current_role = core.op_proto_and_checker_maker.OpRole.Forward self._op_role_var = [] @property def op_role(self): return self._current_role @op_role.setter def set_op_role(self, role): self._current_role = role @property def op_role_var(self): return self._op_role_var @op_role_var.setter def set_op_role_var(self, var_name): self._op_role_var = [var_name] @contextlib.contextmanager def optimized_guard(self, var): OpRole = core.op_proto_and_checker_maker.OpRole self._current_role = OpRole.Optimize self._op_role_var = [var.name if isinstance(var, Variable) else var] yield self._op_role_var = [] self._current_role = OpRole.Forward def __str__(self): return self.to_string(True) def to_string(self, throw_on_error, with_details=False): """ To debug string. Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True with_details(bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True Returns(str): The debug string. """ assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) if with_details: res_str = "" for block in self.blocks: res_str += block.to_string(throw_on_error, with_details) else: protostr = self.desc.serialize_to_string() proto = framework_pb2.ProgramDesc.FromString(str(protostr)) res_str = _debug_string_(proto, throw_on_error) return res_str def get_desc(self): return self.desc def clone(self, for_test=False): """Clone the Program object Set for_test to False when we want to clone the program for training. Set for_test to True when we want to clone the program for testing. Args: for_test(bool): Some operators, such as batch_norm and drop_out ops, behave differently in training and testing. If for_test is True, the is_test attributes in these operators will be set to True for testing purposes, otherwise, they remain unchanged. Returns(Program): The cloned Program object. """ if for_test: p = self.inference_optimize() else: p = Program() p.desc = core.ProgramDesc(self.desc) p.blocks = [Block(p, i) for i in xrange(self.desc.num_blocks())] p.sync_with_cpp() p.copy_param_info_from(self) p.copy_data_info_from(self) return p def prune(self, targets): if not isinstance(targets, list): targets = [targets] targets_idx = [] for t in targets: if not isinstance(t, Operator): if isinstance(t, Variable): # After transpiler processing, the op that output this # variable maybe has been changed, so t.op is not reliable # and we need to find the current op that generate this # variable here. t.op = None global_block = self.global_block() for idx, op in enumerate(global_block.ops): if t.name in op.output_arg_names: t.op = op break t = t.op if t is None: raise ValueError( "The target variable must have an " "associated operator that generates it.") else: raise ValueError("All targets of prune() can only be " "Variable or Operator.") targets_idx.append([t.block.idx, t.idx]) res = Program() res.desc = core.prune(self.desc, targets_idx) res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())] res.sync_with_cpp() return res def inference_optimize(self): # this is an alternative implement before # core.inference_optimize being fixed. res = Program() res.desc = core.ProgramDesc(self.desc) for i in xrange(res.desc.num_blocks()): block = res.desc.block(i) for j in xrange(block.op_size()): op = block.op(j) if op.has_attr('is_test'): op.set_attr('is_test', True) res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())] res.sync_with_cpp() return res @staticmethod def parse_from_string(binary_str): p = Program() p.desc = core.ProgramDesc(binary_str) p.blocks = [Block(p, i) for i in xrange(p.desc.num_blocks())] p.sync_with_cpp() return p @property def random_seed(self): return self._seed @property def num_blocks(self): return self.desc.num_blocks() @random_seed.setter def random_seed(self, seed): if not isinstance(seed, int): raise ValueError("Seed must be a integer.") self._seed = seed def __repr__(self): return str(self) def global_block(self): return self.blocks[0] def block(self, index): return self.blocks[index] def current_block(self): return self.blocks[self.current_block_idx] def create_block(self, parent_idx=None): new_block_idx = len(self.blocks) parent = self.current_block() if parent_idx is None else self.block( parent_idx) self.desc.append_block(parent.desc) self.current_block_idx = new_block_idx self.blocks.append(Block(self, self.current_block_idx)) return self.current_block() def rollback(self): self.current_block_idx = self.current_block().parent_idx def sync_with_cpp(self): for block_idx in range(len(self.blocks), self.desc.num_blocks()): self.blocks.append(Block(self, block_idx)) for block in self.blocks: block.sync_with_cpp() def copy_param_info_from(self, other): """ Copy the information of parameters from other program. Args: other(Program): Other program Returns: None """ if not isinstance(other, Program): raise TypeError("copy_param_info_from should be invoked with " "Program") if len(self.blocks) != len(other.blocks): raise ValueError("copy_param_info_from should be invoked with two " "program, with represent the same topology") self.global_block().copy_param_info_from(other.global_block()) def copy_data_info_from(self, other): """ Copy the information of data variables from other program. Args: other(Program): Other program Returns: None """ if not isinstance(other, Program): raise TypeError("copy_param_info_from should be invoked with " "Program") if len(self.blocks) != len(other.blocks): raise ValueError("copy_param_info_from should be invoked with two " "program, with represent the same topology") for var in other.global_block().vars.itervalues(): if var.is_data: self.global_block().var(var.name).is_data = True def list_vars(self): for each_block in self.blocks: for each_var in each_block.vars.itervalues(): yield each_var class Parameter(Variable): def __init__(self, block, shape, dtype, **kwargs): if shape is None or dtype is None: raise ValueError("Parameter must set shape and dtype") if len(shape) == 0: raise ValueError("Parameter shape cannot be empty") for each in shape: if each < 0: raise ValueError("Parameter shape should not be related with " "batch-size") Variable.__init__( self, block, persistable=True, shape=shape, dtype=dtype, **kwargs) self.trainable = kwargs.get('trainable', True) self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0}) self.regularizer = kwargs.get('regularizer', None) self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None) self.do_model_average = kwargs.get('do_model_average', None) def __str__(self): return self.to_string(True) def to_string(self, throw_on_error, with_details=False): """ To debug string. Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True with_details(bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True Returns(str): The debug string. """ assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) if with_details: res_str = Variable.to_string(self, throw_on_error, True) additional_attr = ("trainable", "optimize_attr", "regularizer", "gradient_clip_attr", "do_model_average") for attr_name in additional_attr: res_str += "%s: %s\n" % (attr_name, str(getattr(self, attr_name))) else: res_str = Variable.to_string(self, throw_on_error, False) return res_str __repr__ = __str__ # program is a global instance. _main_program_ = Program() _startup_program_ = Program() def default_startup_program(): """ Get default startup program. In startup program, Paddle will initialize parameters, initialize nccl handle, etc. Returns: Program: startup program """ return _startup_program_ def default_main_program(): """ Get default main program. The main program is used for training or testing. Returns: Program: main program """ return _main_program_ def switch_main_program(program): """ Switch the main program to a new program. Args: program(Program): The new main program Returns: Program: The previous main program """ global _main_program_ prev_program = _main_program_ _main_program_ = program return prev_program def switch_startup_program(program): """ Switch the startup program to a new program Args: program(Program): The new startup program Returns: Program: The previous startup program """ global _startup_program_ prev_program = _startup_program_ _startup_program_ = program return prev_program @contextlib.contextmanager def program_guard(main_program, startup_program=None): """ Switch program with `with` statement Examples: >>> with program_guard(Program()): >>> data = fluid.layers.data(...) >>> hidden = fluid.layers.fc(...) Args: main_program(Program): New main program inside `with` statement startup_program(Program): New startup program inside `with` statement. None means do not change startup program. Returns: None """ if not isinstance(main_program, Program): raise TypeError("main_program should be Program") main_program = switch_main_program(main_program) if startup_program is not None: if not isinstance(startup_program, Program): raise TypeError("startup_program should be Program") startup_program = switch_startup_program(startup_program) yield switch_main_program(main_program) if startup_program is not None: switch_startup_program(startup_program) def get_var(name, program=None): """ Get a variable by name from the global block of a program Args: name(str): name of the variable program(Program|None): program object. If None, default_global_program() will be used. Returns: Variable """ if program is None: program = default_main_program() assert isinstance(name, str) assert isinstance(program, Program) return program.global_block().var(name)
47,188
32.278561
94
py
Paddle
Paddle-master/python/paddle/fluid/backward.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paddle.fluid import framework as framework from . import core import collections import copy import unique_name __all__ = [ 'append_backward', 'calc_gradient', ] def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): """ Traverse all ops in op_descs[begin_idx : end_idx], if any op has inputs/outputs named "old_name", rename it as 'new_name' """ if begin_idx is None: begin_idx = 0 if end_idx is None: end_idx = len(op_descs) for i in range(begin_idx, end_idx): op_desc = op_descs[i] if isinstance(op_desc, tuple): op_desc = op_desc[0] op_desc.rename_input(old_name, new_name) op_desc.rename_output(old_name, new_name) def _create_op_desc_(op_type, inputs, outputs, attrs): """ Create a C++ OpDesc object with specified inputs, outputs and attributes. """ op_desc = core.OpDesc() op_desc.set_type(op_type) for para, args in inputs.iteritems(): op_desc.set_input(para, args) for para, args in outputs.iteritems(): op_desc.set_output(para, args) op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() if op_role_attr_name not in attrs: attrs[ op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward for name, val in attrs.iteritems(): if isinstance(val, framework.Block): op_desc.set_block_attr(name, val.desc) else: op_desc.set_attr(name, val) return op_desc def _infer_var_data_type_(grad_var_name, block): """ Infer the data type of given grad variable """ grad_var = block.desc.find_var(grad_var_name.encode("ascii")) fwd_name = _strip_grad_suffix_(grad_var_name.encode("ascii")) if block.desc.has_var_recursive(fwd_name): fwd_var = block.desc.find_var_recursive(fwd_name.encode("ascii")) grad_var.set_dtype(fwd_var.dtype()) else: grad_var.set_dtype(core.VarDesc.VarType.FP32) def _all_in_set_(cands, s): """ Test if all elements of 'cands' are in set 's' """ if len(cands) == 0: return False for c in cands: if not c in s: return False return True def _some_in_set_(cands, s): """ Test if some elements of 'cands' are in set 's' """ if len(cands) == 0: return False for c in cands: if c in s: return True return False def _strip_grad_suffix_(name): """ Strip the grad suffix from the given varibale name e.g. x@GRAD ==> x y@GRAD@RENAME@1 ==> y """ pos = name.find(core.grad_var_suffix()) return name[:pos] if pos != -1 else name def _append_grad_suffix_(name): """ Append grad suffix to the given variable name e.g. x ==> x@GRAD """ return name + core.grad_var_suffix() def _addup_repetitive_outputs_(op_descs): """ In backward part, an variable may be the output of more than one ops. In this case, the variable should be the accumulation of all the outputs. `sum_op`s are added to implement the accumulate. """ pending_sum_ops = [] var_rename_count = collections.defaultdict(int) renamed_vars = collections.defaultdict(list) for idx, op_desc in enumerate(op_descs): for var_name in op_desc.input_arg_names(): if len(renamed_vars[var_name]) > 1: pending_sum_ops.append( (_create_op_desc_("sum", {"X": renamed_vars[var_name]}, {"Out": [var_name]}, {}), idx)) renamed_vars[var_name] = [var_name] for var_name in op_desc.output_arg_names(): if var_name == core.empty_var_name( ) or var_name in op_desc.input_arg_names(): # empty variable or inplace op continue if len(renamed_vars[var_name]) == 0: # it's the first time we get the variable renamed_vars[var_name] = [var_name] else: if len(renamed_vars[var_name]) == 1: new_name = var_name + "@RENAME@" + \ str(var_rename_count[var_name]) var_rename_count[var_name] += 1 # rename original var_name renamed_vars[var_name][0] = new_name _rename_arg_(op_descs, var_name, new_name, 0, idx) _rename_arg_(pending_sum_ops, var_name, new_name) new_name = var_name + "@RENAME@" + \ str(var_rename_count[var_name]) var_rename_count[var_name] += 1 op_desc.rename_output(var_name, new_name) renamed_vars[var_name].append(new_name) for var_name, inputs in renamed_vars.iteritems(): if len(inputs) > 1: pending_sum_ops.append((_create_op_desc_( "sum", {"X": inputs}, {"Out": [var_name]}, {}), len(op_descs))) # sum_op descs are sorted according to their insert position for p in reversed(pending_sum_ops): op_descs.insert(p[1], p[0]) return op_descs def _remove_no_grad_branch_(op_descs, no_grad_set): """ Remove unnecessary grad ops A grad op can be removed in two cases: 1. all outputs of the grad op are in 'no_grad_set' 2. all grad inputs of the grad op are in 'no_grad_set' """ def _op_can_be_removed_(op_desc, no_grad_set): out_arg_names = op_desc.output_arg_names() if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set): return True if _all_in_set_( filter(lambda name: name.find(core.grad_var_suffix()) != -1, op_desc.input_arg_names()), no_grad_set): no_grad_set.update(out_arg_names) return True return False # Remove ops whose outputs are all in no_grad_dict op_descs = filter( lambda op_desc: not _op_can_be_removed_(op_desc, no_grad_set), op_descs) # Insert fill_zeros_like_op to_insert = [] for idx, op_desc in enumerate(op_descs): for arg in op_desc.input_arg_names(): if core.grad_var_suffix() in arg and arg in no_grad_set: to_insert.append((_create_op_desc_("fill_zeros_like", { "X": [_strip_grad_suffix_(arg)] }, {"Out": [arg]}, {}), idx)) map(lambda p: op_descs.insert(p[1], p[0]), reversed(to_insert)) return op_descs import proto.framework_pb2 as framework_pb2 def serialize_op_decs(op_desc): protostr = op_desc.serialize_to_string() proto = framework_pb2.OpDesc.FromString(str(protostr)) return proto.__str__() def _callback_lookup_(op): """ Only used in _append_backward_ops_ Build and returns a callback function for certain op. For example parallel_do: AllReduce :param op: :return: callback function """ if op.type == 'parallel_do' and op.attr('use_nccl'): all_vars = op.block.vars param_names = set(op.input('parameters')) param_names = filter(lambda name: all_vars[name].stop_gradient is False, param_names) param_grad_names = [n + "@GRAD" for n in param_names] class ParallelDoCallBack(object): def __init__(self, param_grad_names, parallel_scopes_name): self.has_inserted_nccl_init = False self.param_grad_names = param_grad_names self.parallel_scopes_name = parallel_scopes_name def __call__(self, block, context): if not self.has_inserted_nccl_init: op_desc = _create_op_desc_( "ncclInit", {"parallel_scopes": self.parallel_scopes_name}, {"Communicator": ['nccl_com__do_not_change_']}, {}) block.program.global_block().desc.append_op().copy_from( op_desc) self.has_inserted_nccl_init = True current_op_desc = context["__current_op_desc__"] for o_param in current_op_desc.output_names(): for o_argu in current_op_desc.output(o_param): if o_argu in self.param_grad_names: allreduce_out_name = o_argu + "__nccl_all_reduce__" op_desc = _create_op_desc_( "ncclReduce", { "X": [o_argu], "Communicator": ['nccl_com__do_not_change_'] }, {"Out": [allreduce_out_name]}, {"reduction": "ncclSum", "root": 0}, ) block.desc.append_op().copy_from(op_desc) op_desc = _create_op_desc_( "assign", {"X": [allreduce_out_name]}, {"Out": [o_argu]}, {}) block.desc.append_op().copy_from(op_desc) return ParallelDoCallBack(param_grad_names, op.output("parallel_scopes")) else: return None def _append_backward_ops_(block, ops, target_block, no_grad_dict, grad_to_var, callbacks=None): """ Create all grad ops, and insert them into given block Args: block(Block): the block where forward ops are ops(Op): the forward operators whose backward ops need to be added target_block(Block): the block which is going to hold new generated grad ops no_grad_dict(dict): key(int) block index val(set) a set of varibale names. These varibales have no gradient grad_to_var(dict)(output argument): key(str): grad variable name val(str): corresponding forward variable name callback(callable object): a callable object used to decorate new generated grad ops """ if callbacks is not None: assert (isinstance(callbacks, list)) for cb in callbacks: if not hasattr(cb, '__call__'): raise ValueError("'callback' must be a callable object.") # grad_op_descs holds created grad_op, and will be appended to target_block grad_op_descs = [] program = block.program for op in reversed(ops): grad_sub_block_list = [] # If the op has its own sub-block, deal with the sub-block first if op.has_attr("sub_block"): sub_block = program.block(op.block_attr("sub_block")) grad_sub_block = program.create_block() grad_sub_block.set_forward_block_idx(sub_block.idx) cb = _callback_lookup_(op) if cb is not None: if callbacks is None: new_callbacks = [cb] else: new_callbacks = callbacks + [_callback_lookup_(op)] _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, no_grad_dict, grad_to_var, new_callbacks) else: _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, no_grad_dict, grad_to_var, callbacks) program.rollback() grad_sub_block_list.append(grad_sub_block.desc) # Getting op's corresponding grad_op grad_op_desc, op_grad_to_var = core.get_grad_op_desc( op.desc, no_grad_dict[block.idx], grad_sub_block_list) grad_op_descs.extend(grad_op_desc) grad_to_var.update(op_grad_to_var) grad_op_descs = _addup_repetitive_outputs_(grad_op_descs) grad_op_descs = _remove_no_grad_branch_(grad_op_descs, no_grad_dict[block.idx]) # append op_desc in grad_op_descs to target_block op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() backward = core.op_proto_and_checker_maker.OpRole.Backward for op_desc in grad_op_descs: new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op_desc) new_op_desc.set_attr(op_role_attr_name, backward) grad_to_var["__current_op_desc__"] = new_op_desc if callbacks is not None: assert (isinstance(callbacks, list)) for cb in callbacks: cb(block=target_block, context=grad_to_var) def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): """ Create new variables required by backward pass. Args: block(Block): the block where new variables will be created start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created grad_to_var(dict): key(str): grad variable name val(str): corresponding forward variable name In most cases, this dict is generated by _append_backward_ops_() grad_info_map(dict)(output argument): key(str): forward variable name val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable """ for op_idx in range(start_op_idx, block.desc.op_size()): op_desc = block.desc.op(op_idx) if op_desc.has_attr("sub_block"): sub_block = block.program.block(op_desc.block_attr("sub_block")) _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map) new_vars = set() # create new gradient variables for grad_var_name in op_desc.output_arg_names(): grad_var_name = grad_var_name.encode("ascii") if block.desc.has_var_recursive( grad_var_name) or grad_var_name == core.empty_var_name(): continue block.desc.var(grad_var_name) new_vars.add(grad_var_name) if not grad_to_var.has_key(grad_var_name): continue grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block) # infer_shape and infer_type op_desc.infer_var_type(block.desc) op_desc.infer_shape(block.desc) # ncclInit dones't need to set data_type if op_desc.type() == 'ncclInit': continue for arg in op_desc.output_arg_names(): if arg in new_vars: _infer_var_data_type_(arg, block) def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map): var_map = copy.copy(target_grad_map) for op_idx in range(start_op_idx, block.desc.op_size()): op_desc = block.desc.op(op_idx) for name in op_desc.input_arg_names(): if name in var_map: op_desc.rename_input(name, var_map[name]) for name in op_desc.output_arg_names(): if block.desc.find_var(name.encode("ascii")): new_name = unique_name.generate(name) op_desc.rename_output(name, new_name) var_map[name] = new_name for g, ng in var_map.iteritems(): if g in grad_to_var: grad_to_var[ng] = grad_to_var[g] grad_to_var.pop(g) def _get_stop_gradients_(program): no_grad_dict = dict() assert isinstance(program, framework.Program) for block in program.blocks: assert isinstance(block, framework.Block) block_no_grad_set = set() for var in block.vars.itervalues(): assert isinstance(var, framework.Variable) if var.stop_gradient: block_no_grad_set.add(_append_grad_suffix_(var.name)) no_grad_dict[block.idx] = block_no_grad_set return no_grad_dict def append_backward(loss, parameter_list=None, no_grad_set=None, callbacks=None): """ Append backward part to main_program Args: loss(Variable): The variable generated by cost function. parameter_list(list[string]): Parameters that need to be updated by optimizer. If None, it means all parameters need to be updated. no_grad_set(set): Variables that have no gradients in Block 0. All variables with `step_gradient=True` from all blocks will be automatically added. Return: (list[(Variable,Variable)]): list of (parameter, gradient) pair. """ assert isinstance(loss, framework.Variable) if loss.op is None: # the loss is from a cloned program. Find loss op manually. for op in reversed(loss.block.ops): assert isinstance(op, framework.Operator) if len(op.output_arg_names) == 1 and op.output_arg_names[ 0] == loss.name: loss.op = op break if loss.op is None: raise ValueError("loss.op is None. Should not happend") loss.op.set_attr(core.op_proto_and_checker_maker.kOpRoleAttrName(), int(core.op_proto_and_checker_maker.OpRole.Forward) | int(core.op_proto_and_checker_maker.OpRole.Loss)) if callbacks is not None: isinstance(callbacks, list) program = loss.block.program if no_grad_set is None: no_grad_set = set() no_grad_set = copy.copy(no_grad_set) no_grad_dict = _get_stop_gradients_(program) no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set)) grad_info_map = dict() root_block = program.block(0) fwd_op_num = root_block.desc.op_size() current_block_idx = program.current_block_idx grad_to_var = dict() op_desc = _create_op_desc_( "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, { "shape": [1], "value": 1.0, "dtype": loss.dtype, "force_cpu": False, core.op_proto_and_checker_maker.kOpRoleAttrName(): int(core.op_proto_and_checker_maker.OpRole.Backward) | int(core.op_proto_and_checker_maker.OpRole.Loss), }) root_block.desc.append_op().copy_from(op_desc) block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0])) op_path = _find_op_path_(root_block, [loss], [], block_no_grad_set) no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set)) _append_backward_ops_(root_block, op_path, root_block, no_grad_dict, grad_to_var, callbacks) # Because calc_gradient may be called multiple times, # we need rename the internal gradient variables so that they have # different names. _rename_grad_(root_block, fwd_op_num, grad_to_var, {}) _append_backward_vars_(root_block, fwd_op_num, grad_to_var, grad_info_map) program.current_block_idx = current_block_idx program.sync_with_cpp() # FIXME(zcd): prevent loss.grad optimized by mem_opt. loss.block.var(_append_grad_suffix_(loss.name)).persistable = True if parameter_list is not None: parameters = parameter_list else: params = program.global_block().all_parameters() parameters = [param.name for param in params] params_and_grads = [] for param in parameters: if param not in grad_info_map: continue grad_info = grad_info_map[param] grad_block = grad_info[1] if not grad_block.has_var(grad_info[0]): raise ValueError("grad block[{0}] did not have grad var {1}".format( grad_info[1], grad_info[0])) # Get the param var from the global block param_var = program.global_block().var(param) grad_var = grad_block.var(grad_info[0]) if loss.block.has_var(grad_info[0]): params_and_grads.append((param_var, grad_var)) else: params_and_grads.append((param_var, None)) op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName() for p, g in params_and_grads: if g is None: continue for op in reversed(program.global_block().ops): assert isinstance(op, framework.Operator) if g.name in op.output_arg_names: g.op = op break if g.op is None: raise ValueError("Unexpected branch") attr_val = [p.name, g.name] if g.op.has_attr(op_role_var_attr_name): attr_val.extend(g.op.attr(op_role_var_attr_name)) g.op.set_attr(op_role_var_attr_name, attr_val) return params_and_grads def _as_list(x): if x is None: return [] return list(x) if isinstance(x, collections.Sequence) else [x] def _find_op_path_(block, outputs, inputs, no_grad_set): """ no_grad_set will also be changed """ input_names = set([inp.name for inp in inputs]) output_names = set([out.name for out in outputs]) relevant_op_flags = [True] * len(block.ops) # All the inputs of the block are used if inputs is empty, if inputs: for i, op in enumerate(block.ops): if _some_in_set_(op.desc.input_arg_names(), input_names): for name in op.desc.output_arg_names(): if name not in no_grad_set: input_names.add(name) else: relevant_op_flags[i] = False for i, op in reversed(list(enumerate(block.ops))): if _some_in_set_(op.desc.output_arg_names(), output_names): for name in op.desc.input_arg_names(): if name not in no_grad_set: output_names.add(name) else: relevant_op_flags[i] = False op_path = [ block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i] ] if inputs: for op in op_path: for name in op.desc.input_arg_names(): if name not in input_names: no_grad_set.add(name) return op_path def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): """ Backpropagate the graidents of targets to inputs. Args: targets(Variable|list[Variable]): The target variables inputs(Variable|list[Variable]): The input variables no_grad_set(set[string]): The names of variables that have no gradients in Block 0. All variables with `stop_gradient=True` from all blocks will be automatically added. Return: (list[Variable]): list of gradients for inputs If an input does not affect targets, the corresponding gradient variable will be None """ targets = _as_list(targets) inputs = _as_list(inputs) target_gradients = _as_list(target_gradients) block = targets[0].block prog = block.program block_idx = block.idx if not target_gradients: target_gradients = [None] * len(targets) if len(targets) != len(target_gradients): raise ValueError( "Should have the same number of target_gradients as targets") if no_grad_set is None: no_grad_set = set() no_grad_set = copy.copy(no_grad_set) no_grad_dict = _get_stop_gradients_(prog) no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set)) fwd_op_num = block.desc.op_size() target_grad_map = {} for i, grad in enumerate(target_gradients): target = targets[i] if grad is None: grad_name = _append_grad_suffix_(target.name) op_desc = _create_op_desc_("fill_constant_batch_size_like", {"Input": [target.name]}, {"Out": [grad_name]}, { "shape": target.shape, "value": 1.0, "dtype": target.dtype, 'input_dim_idx': 0, 'output_dim_idx': 0 }) block.desc.append_op().copy_from(op_desc) else: if target.block.idx != block_idx or target.block.program != prog: raise ValueError("all targets must be in the same block") if target.shape != grad.shape: raise ValueError( "The shapes of target and grad are different: %s %s" % ( target.name, grad.name)) target_grad_map[_append_grad_suffix_(target.name)] = grad.name for input in inputs: if input.block.program != prog: raise "input must be in the same program as targets" block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0])) op_path = _find_op_path_(block, targets, inputs, block_no_grad_set) no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set)) grad_to_var = dict() grad_info_map = dict() _append_backward_ops_(block, op_path, block, no_grad_dict, grad_to_var) # Because calc_gradient may be called multiple times, # we need rename the internal gradient variables so that they have # different names. _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map) _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map) prog.sync_with_cpp() grad_vars = [] for input_var in inputs: if input_var.name not in grad_info_map: grad_vars.append(None) else: grad_info = grad_info_map[input_var.name] grad_block = grad_info[1] grad_var = grad_block.var(grad_info[0]) grad_vars.append(grad_var) if len(grad_vars) == 1: return grad_vars[0] else: return grad_vars
26,531
36.902857
128
py
Paddle
Paddle-master/python/paddle/fluid/layer_helper.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import itertools from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating import unique_name from paddle.fluid.initializer import Constant, Xavier from param_attr import ParamAttr, WeightNormParamAttr import core class LayerHelper(object): def __init__(self, layer_type, **kwargs): self.kwargs = kwargs self.layer_type = layer_type name = self.kwargs.get('name', None) if name is None: self.kwargs['name'] = unique_name.generate(self.layer_type) @property def name(self): return self.kwargs['name'] @property def main_program(self): return default_main_program() @property def startup_program(self): return default_startup_program() def append_op(self, *args, **kwargs): return self.main_program.current_block().append_op(*args, **kwargs) def multiple_input(self, input_param_name='input'): inputs = self.kwargs.get(input_param_name, []) type_error = TypeError( "Input of {0} layer should be Variable or sequence of Variable". format(self.layer_type)) if isinstance(inputs, Variable): inputs = [inputs] elif not isinstance(inputs, list) and not isinstance(inputs, tuple): raise type_error else: for each in inputs: if not isinstance(each, Variable): raise type_error return inputs def input(self, input_param_name='input'): inputs = self.multiple_input(input_param_name) if len(inputs) != 1: raise "{0} layer only takes one input".format(self.layer_type) return inputs[0] @property def param_attr(self): return ParamAttr.to_attr(self.kwargs.get('param_attr', None)) @property def bias_attr(self): return ParamAttr.to_attr(self.kwargs.get('bias_attr', None)) def multiple_param_attr(self, length): param_attr = self.param_attr if isinstance(param_attr, ParamAttr): param_attr = [param_attr] if len(param_attr) != 1 and len(param_attr) != length: raise ValueError("parameter number mismatch") elif len(param_attr) == 1 and length != 1: tmp = [None] * length for i in xrange(length): tmp[i] = copy.deepcopy(param_attr[0]) param_attr = tmp return param_attr def iter_inputs_and_params(self, input_param_name='input'): inputs = self.multiple_input(input_param_name) param_attrs = self.multiple_param_attr(len(inputs)) for ipt, param_attr in itertools.izip(inputs, param_attrs): yield ipt, param_attr def input_dtype(self, input_param_name='input'): inputs = self.multiple_input(input_param_name) dtype = None for each in inputs: if dtype is None: dtype = each.dtype elif dtype != each.dtype: raise ValueError("Data Type mismatch: %d to %d" % (dtype, each.dtype)) return dtype def _create_weight_normalize(self, attr, shape, dtype): from .layers import elementwise_mul, elementwise_div, reshape # Remove these ops when LayerHelper and layers support indicating # program and block. def __norm_op(x, out=None, p=2, dim=None, keep_dim=False, block=self.startup_program.global_block()): if out is None: out = block.create_var( name=unique_name.generate(".".join( [self.name, 'weight_norm_norm'])), dtype=dtype, persistable=False) abs_out = block.create_var( name=unique_name.generate(".".join( [self.name, 'weight_norm_abs'])), dtype=dtype, persistable=False) block.append_op( type='abs', inputs={'X': x}, outputs={'Out': abs_out}) pow_out = block.create_var( name=unique_name.generate(".".join( [self.name, 'weight_norm_pow'])), dtype=dtype, persistable=False) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': float(p)}) sum_out = block.create_var( name=unique_name.generate(".".join( [self.name, 'weight_norm_sum'])), dtype=dtype, persistable=False) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': dim, 'keep_dim': keep_dim, 'reduce_all': True if dim is None else False }) block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': 1. / p}) return out def __reshape_op(x, shape, out=None, block=self.startup_program.global_block()): if out is None: out = block.create_var( name=unique_name.generate(".".join( [self.name, 'weight_norm_reshape'])), dtype=dtype, persistable=False) block.append_op( type='reshape', inputs={'X': x}, outputs={'Out': out}, attrs={'shape': shape}) return out def __transpose_op(x, axis, out=None, block=self.startup_program.global_block()): if out is None: out = block.create_var( name=unique_name.generate(".".join( [self.name, 'weight_norm_transpose'])), dtype=dtype, persistable=False) block.append_op( type='transpose', inputs={'X': x}, outputs={'Out': out}, attrs={'axis': axis}) return out def __norm_except_dim(x, out=None, dim=None, block=self.startup_program.global_block()): """Computes the norm over all dimensions except dim""" if out is None: out = block.create_var( name=unique_name.generate(".".join( [self.name, 'weight_norm_norm'])), dtype=dtype, persistable=False) if dim is None: __norm_op(x, out, dim=dim, block=block) elif dim == 0: out_shape = [x.shape[0]] + [1] * (len(x.shape) - 1) reshape = __reshape_op(x, shape=[x.shape[0], -1], block=block) norm = __norm_op(reshape, dim=1, block=block) __reshape_op(norm, out=out, shape=out_shape, block=block) elif dim == len(x.shape) - 1: out_shape = [1] * (len(x.shape) - 1) + [x.shape[-1]] reshape = __reshape_op(x, shape=[-1, x.shape[-1]], block=block) norm = __norm_op(reshape, dim=0, block=block) __reshape_op(norm, out=out, shape=out_shape, block=block) else: perm = range(len(x.shape)) perm[0], perm[dim] = dim, 0 transpose = __transpose_op(x, perm, block=block) norm = __norm_op(transpose, dim=0, block=block) __transpose_op(norm, perm, out=out, block=block) return out def __weight_normalize(g, v, dim): """Calculations for weight normalization""" norm = __norm_except_dim( v, dim=dim, block=self.main_program.current_block()) scale = elementwise_div( x=g, y=norm) # The shapes of g and norm are the same. # Currently, elementwise_mul only support broadcast when the shape # of y is a subset of the shape of x. Thus, we reshape y to squeeze # to achive the subset. w = elementwise_mul( x=v, y=scale if dim is None else reshape( x=scale, shape=[v.shape[dim]]), axis=-1 if dim is None else dim) # To serialize the original parameter for inference, maybe a # parameter rather than a variable should be returned. return w g_param_attr = copy.deepcopy(attr) g_param_attr.name = attr.name + '_g' g_param_shape = [1] * len(shape) if attr.dim is not None: g_param_shape[attr.dim] = shape[attr.dim] v_param_attr = copy.deepcopy(attr) v_param_attr.name = attr.name + '_v' v_param_shape = shape # Add to startup_program to initialize g and v. # Try to reconstruct the initializer of w by initializing g and v. # Set the initializers of g and v as below, then the distribution # of w is the same as initializing w with the given initializer. # For Data-Dependent Initialization, please compute the init-values # of g and v in external and then feed the values to g and v by # executing an extra program. g_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=g_param_shape, **g_param_attr.to_kwargs(with_initializer=False)) v_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=v_param_shape, **v_param_attr.to_kwargs(with_initializer=True)) __norm_except_dim( x=v_param, out=g_param, dim=attr.dim, block=self.startup_program.global_block()) # Add weight normalization to main_program g_param = self.main_program.global_block().create_parameter( dtype=dtype, shape=g_param_shape, **g_param_attr.to_kwargs()) v_param = self.main_program.global_block().create_parameter( dtype=dtype, shape=v_param_shape, **v_param_attr.to_kwargs()) w_param = __weight_normalize(g_param, v_param, dim=attr.dim) return w_param def create_parameter(self, attr, shape, dtype, is_bias=False, default_initializer=None): # Deepcopy the attr so that parameters can be shared in program attr = copy.deepcopy(attr) assert isinstance(attr, ParamAttr) suffix = 'b' if is_bias else 'w' if attr.name is None: attr.name = unique_name.generate(".".join([self.name, suffix])) if default_initializer is None and attr.initializer is None: if is_bias: attr.set_default_bias_initializer() else: attr.set_default_param_initializer() else: attr.set_default_initializer(default_initializer) # If weight normalization is set, insert extra parameters and ops. # Refer to https://arxiv.org/pdf/1602.07868.pdf if isinstance(attr, WeightNormParamAttr): param = self._create_weight_normalize(attr, shape, dtype) WeightNormParamAttr.params_with_weight_norm.append(param) return param self.startup_program.global_block().create_parameter( dtype=dtype, shape=shape, **attr.to_kwargs(with_initializer=True)) return self.main_program.global_block().create_parameter( dtype=dtype, shape=shape, **attr.to_kwargs()) def get_parameter(self, name): param = self.main_program.global_block().var(name) if not isinstance(param, Parameter): raise ValueError("no Parameter name %s found" % name) return param def create_tmp_variable(self, dtype, stop_gradient=False): return self.main_program.current_block().create_var( name=unique_name.generate(".".join([self.name, 'tmp'])), dtype=dtype, persistable=False, stop_gradient=stop_gradient) def create_variable(self, *args, **kwargs): return self.main_program.current_block().create_var(*args, **kwargs) def create_global_variable(self, persistable=False, *args, **kwargs): """ create global variable, note that there is no initializer for this global variable. Args: persistable(bool): True if it is a checkpoint value. *args: See create_var's documentation **kwargs: See create_var's documentation Returns(Variable): the created variable. """ return self.main_program.global_block().create_var( *args, persistable=persistable, **kwargs) def create_or_get_global_variable(self, name, *args, **kwargs): """ Creates a global variable if not exists and returns the variable and a boolean flag which is true when it is a new variable. """ if self.main_program.global_block().has_var(name): return self.main_program.global_block().var(name), False else: return self.create_global_variable(name=name, *args, **kwargs), True def set_variable_initializer(self, var, initializer): assert isinstance(var, Variable) self.startup_program.global_block().create_var( name=var.name, type=var.type, dtype=var.dtype, shape=var.shape, persistable=True, initializer=initializer) def append_bias_op(self, input_var, dim_start=1, dim_end=None): """ Append bias operator and return its output. If the user does not set bias_attr, append_bias_op will return input_var :param input_var: the input variable. The len(input_var.shape) is larger or equal than 2. :bias_initializer: an instance of a subclass of Initializer used to initialize the bias :param dim_start: :param dim_end: the shape of the bias will be input_var.shape[dim_start:dim_end]. The bias is broadcasted to other dimensions and added to input_var to get the output """ size = list(input_var.shape[dim_start:dim_end]) bias_attr = self.bias_attr if not bias_attr: return input_var b = self.create_parameter( attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True) tmp = self.create_tmp_variable(dtype=input_var.dtype) self.append_op( type='elementwise_add', inputs={'X': [input_var], 'Y': [b]}, outputs={'Out': [tmp]}, attrs={'axis': dim_start}) return tmp def append_activation(self, input_var): act = self.kwargs.get('act', None) if act is None: return input_var if isinstance(act, basestring): act = {'type': act} if 'use_cudnn' in self.kwargs and self.kwargs.get('use_cudnn'): act['use_cudnn'] = self.kwargs.get('use_cudnn') if 'use_mkldnn' in self.kwargs: act['use_mkldnn'] = self.kwargs.get('use_mkldnn') act_type = act.pop('type') tmp = input_var # NOTE(dzhwinter): some activation support inplace compution. if not core.IsInplace(act_type): tmp = self.create_tmp_variable(dtype=input_var.dtype) self.append_op( type=act_type, inputs={"X": [input_var]}, outputs={"Out": [tmp]}, attrs=act) return tmp def _get_default_initializer(self, dtype): if dtype is None or dtype_is_floating(dtype) is True: return Xavier() else: # For integer and boolean types, initialize with all zeros return Constant() def is_instance(self, param_name, cls): param = self.kwargs.get(param_name, None) if not isinstance(param, cls): raise TypeError("The input {0} parameter of method {1} must be {2}", param_name, self.layer_type, cls.__name__)
17,353
39.264501
107
py
Paddle
Paddle-master/python/paddle/fluid/regularizer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import framework from . import core __all__ = [ 'append_regularization_ops', 'WeightDecayRegularizer', 'L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer' ] def append_regularization_ops(parameters_and_grads, regularization=None): """Create and add backward regularization Operators Creates and adds backward regularization operators in the BlockDesc. This will add gradients of the regularizer function to the gradients of the parameters and return these modified gradients. This is the same as implementing weight decay in optimizers for regularization. Args: parameters_and_grads: A list of (parameters, gradients) pairs that need to be regularized. regularization: A global regularizer. If the parameter is not set. It will be applied with regularizer. Returns: list of (parameters, gradients) pair with the regularized gradient Raises: Exception: Unknown regularization type """ params_and_grads = [] for param, grad in parameters_and_grads: with param.block.program.optimized_guard(param): # If no gradient then we don't need to do anything if grad is None: params_and_grads.append((param, grad)) continue regularization_term = None if param.regularizer is not None: # Add variable for regularization term in grad block regularization_term = param.regularizer(param, grad, grad.block) elif regularization is not None: regularization_term = regularization(param, grad, grad.block) # If no regularization specified, then we don't need to do anything if regularization_term is None: params_and_grads.append((param, grad)) continue assert grad.shape == regularization_term.shape grad.block.append_op( type='elementwise_add', inputs={"X": grad, "Y": regularization_term}, outputs={"Out": grad}) params_and_grads.append((param, grad)) return params_and_grads class WeightDecayRegularizer(object): """Base class for weight decay regularizers Defines the common interface of weight-decay regularizers. Weight-decay regularizers are added only during the backward pass for faster regularization. They add operations to the network that correspond to gradient of the regularization function. Users should not use this class directly, but need to use one of its implementations """ def __init__(self): pass def __call__(self, param, grad, block): """Add corresponding weight decay operations to the network """ raise NotImplementedError() def __str__(self): """Debug string """ raise NotImplementedError() class L2DecayRegularizer(WeightDecayRegularizer): """Implements the L2 Weight Decay Regularization """ def __init__(self, regularization_coeff=0.0): assert regularization_coeff is not None super(L2DecayRegularizer, self).__init__() self._regularization_coeff = regularization_coeff def __call__(self, param, grad, block): """Add L2 weight decay ops to network Adds L2 weight decay ops. L2WeightDecay = reg_coeff * parameter Args: param: parameter variable for which regularization is applied block: block in which variable is to be created Returns: new variable for weight decay """ assert isinstance(param, framework.Parameter) assert isinstance(block, framework.Block) decay = block.create_var( dtype="float32", shape=param.shape, lod_level=param.lod_level) if grad.type == core.VarDesc.VarType.SELECTED_ROWS: decay = block.create_var( dtype="float32", shape=param.shape, type=core.VarDesc.VarType.SELECTED_ROWS) block.append_op( type='lookup_table', inputs={'W': param, 'Ids': grad}, outputs={'Out': decay}, attrs={'is_sparse': True}) param = decay # Append Op to calculate decay block.append_op( type='scale', inputs={"X": param}, outputs={"Out": decay}, attrs={"scale": self._regularization_coeff}) return decay def __str__(self): return "L2Decay, regularization_coeff=%f" % self._regularization_coeff class L1DecayRegularizer(WeightDecayRegularizer): """Implements the L1 Weight Decay Regularization """ def __init__(self, regularization_coeff=0.0): assert regularization_coeff is not None super(L1DecayRegularizer, self).__init__() self._regularization_coeff = regularization_coeff def __call__(self, param, grad, block): """Add L1 weight decay ops to network Adds L1 weight decay ops. L1WeightDecay = reg_coeff * sign(parameter) Args: param: parameter variable for which regularization is applied block: block in which variable is to be created Returns: new variable for weight decay """ assert isinstance(param, framework.Parameter) assert isinstance(block, framework.Block) decay = block.create_var( dtype="float32", shape=param.shape, lod_level=param.lod_level) if grad.type == core.VarDesc.VarType.SELECTED_ROWS: decay = block.create_var( dtype="float32", shape=param.shape, type=core.VarDesc.VarType.SELECTED_ROWS) block.append_op( type='lookup_table', inputs={'W': param, 'Ids': grad}, outputs={'Out': decay}, attrs={'is_sparse': True}) # Append sign op block.append_op( type='sign', inputs={"X": param}, outputs={"Out": decay}) # Append scale op to the output of sign op block.append_op( type='scale', inputs={"X": decay}, outputs={"Out": decay}, attrs={"scale": self._regularization_coeff}) return decay def __str__(self): return "L1Decay, regularization_coeff=%f" % self._regularization_coeff # We short the class name, since users will use the regulaizer with the package # name. The sample code: # # import paddle.fluid as fluid # # hidden = fluid.layers.fc(..., # param_attr=fluid.regularizer.Xavier()) # # It is no need to add a `Regularizer` as the class suffix L1Decay = L1DecayRegularizer L2Decay = L2DecayRegularizer
7,556
33.040541
80
py
Paddle
Paddle-master/python/paddle/fluid/net_drawer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging from collections import defaultdict import paddle.fluid.core as core import paddle.fluid.proto.framework_pb2 as framework_pb2 logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) try: from graphviz import Digraph except ImportError: logger.info( 'Cannot import graphviz, which is required for drawing a network. This ' 'can usually be installed in python with "pip install graphviz". Also, ' 'pydot requires graphviz to convert dot files to pdf: in ubuntu, this ' 'can usually be installed with "sudo apt-get install graphviz".') print('net_drawer will not run correctly. Please install the correct ' 'dependencies.') exit(0) OP_STYLE = { 'shape': 'oval', 'color': '#0F9D58', 'style': 'filled', 'fontcolor': '#FFFFFF' } VAR_STYLE = {} GRAPH_STYLE = {"rankdir": "TB", } GRAPH_ID = 0 def unique_id(): def generator(): GRAPH_ID += 1 return GRAPH_ID return generator def draw_node(op): node = OP_STYLE node["name"] = op.type node["label"] = op.type return node def draw_edge(var_parent, op, var, arg): edge = VAR_STYLE edge["label"] = "%s(%s)" % (var.parameter, arg) edge["head_name"] = op.type edge["tail_name"] = var_parent[arg] return edge def parse_graph(program, graph, var_dict, **kwargs): # fill the known variables for block in program.blocks: for var in block.vars: if not var_dict.has_key(var): var_dict[var] = "Feed" temp_id = 0 proto = framework_pb2.ProgramDesc.FromString( program.desc.serialize_to_string()) for block in proto.blocks: for op in block.ops: op.type = op.type + "_" + str(temp_id) temp_id += 1 graph.node(**draw_node(op)) for o in op.outputs: for arg in o.arguments: var_dict[arg] = op.type for e in op.inputs: for arg in e.arguments: if var_dict.has_key(arg): graph.edge(**draw_edge(var_dict, op, e, arg)) break # only plot the first block def draw_graph(startup_program, main_program, **kwargs): if kwargs.has_key("graph_attr"): GRAPH_STYLE.update(kwargs[graph_attr]) if kwargs.has_key("node_attr"): OP_STYLE.update(kwargs[node_attr]) if kwargs.has_key("edge_attr"): VAR_STYLE.update(kwargs[edge_attr]) graph_id = unique_id() filename = kwargs.get("filename") if filename == None: filename = str(graph_id) + ".gv" g = Digraph( name=str(graph_id), filename=filename, graph_attr=GRAPH_STYLE, node_attr=OP_STYLE, edge_attr=VAR_STYLE, **kwargs) var_dict = {} parse_graph(startup_program, g, var_dict) parse_graph(main_program, g, var_dict) if filename != None: g.save() return g
3,601
27.140625
80
py
Paddle
Paddle-master/python/paddle/fluid/param_attr.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from initializer import Initializer, Xavier, Constant from regularizer import WeightDecayRegularizer __all__ = [ 'ParamAttr', 'WeightNormParamAttr', ] class ParamAttr(object): def __init__(self, name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, gradient_clip=None, do_model_average=None): self.name = name self.initializer = initializer self.learning_rate = learning_rate self.regularizer = regularizer self.trainable = trainable self.gradient_clip = gradient_clip self.model_average = do_model_average def set_default_initializer(self, initializer): if initializer is None: if self.initializer is None: raise ValueError("ParamAttr.initializer is not set") return if self.initializer is not None: return self.initializer = initializer def set_default_param_initializer(self): self.set_default_initializer(Xavier()) def set_default_bias_initializer(self): self.set_default_initializer(Constant(0.0)) @staticmethod def to_attr(arg): if arg is None: return ParamAttr() elif isinstance(arg, list) or isinstance(arg, tuple): return [ParamAttr.to_attr(a) for a in arg] elif isinstance(arg, ParamAttr): return arg elif isinstance(arg, str) or isinstance(arg, unicode): return ParamAttr(name=arg) elif isinstance(arg, Initializer): return ParamAttr(initializer=arg) elif isinstance(arg, WeightDecayRegularizer): return ParamAttr(regularizer=arg) elif isinstance(arg, bool): return ParamAttr.to_attr(None) if arg else False else: raise TypeError("{0} cast to ParamAttr".format(type(arg))) def to_kwargs(self, with_initializer=False): kwargs = { 'name': self.name, 'optimize_attr': { 'learning_rate': self.learning_rate }, 'regularizer': self.regularizer, 'trainable': self.trainable, 'gradient_clip_attr': self.gradient_clip, 'model_average': self.model_average } if with_initializer: kwargs['initializer'] = self.initializer return kwargs class WeightNormParamAttr(ParamAttr): """ Used for weight normalization. Any field in ParamAttr can also be set here. Besides, an extra field dim can be set to indicate the dimension except which to normalize. """ # List to record the parameters reparameterized by weight normalization. # If these parameters are treated as Variable rather than Parameter, # it can be used to discriminate these parameters and help to serialize # these paramters for inference. params_with_weight_norm = [] def __init__(self, dim=None, **kwargs): super(WeightNormParamAttr, self).__init__(**kwargs) self.dim = dim
3,753
33.759259
79
py
Paddle
Paddle-master/python/paddle/fluid/metrics.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fluid Metrics The metrics are accomplished via Python natively. """ import numpy as np import copy import warnings __all__ = [ 'MetricBase', 'CompositeMetric', 'Accuracy', 'ChunkEvaluator', 'EditDistance', 'DetectionMAP', 'Auc', ] def _is_numpy_(var): return isinstance(var, (np.ndarray, np.generic)) def _is_number_(var): return isinstance(var, int) or isinstance(var, float) or (isinstance( var, np.ndarray) and var.shape == (1, )) def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) class MetricBase(object): """ Base Class for all evaluators Args: name(str): The name of evaluator. such as, "accuracy". Used for generate temporary variable name. Interface: Note(*) : the states is the attributes who not has _ prefix. get_config(): print current states and configuration reset(): clear the states. If the Metrics states type is not (int, float, np.ndarray), Please override this method. update(): update states at every minibatch eval(): get metric evaluation in numpy type. """ def __init__(self, name, **kwargs): self._name = str(name) if name != None else self.__class__.__name__ self._kwargs = kwargs if kwargs != None else dict() self.reset() def __str__(self): return self._name def reset(self): """ states is the attributes who not has _ prefix. reset the states of metrics. """ states = { attr: value for attr, value in self.__dict__.iteritems() if not attr.startswith("_") } for attr, value in states.iteritems(): if isinstance(value, int): setattr(self, attr, 0) elif isinstance(value, float): setattr(self, attr, .0) elif isinstance(value, (np.ndarray, np.generic)): setattr(self, attr, np.zeros_like(value)) else: setattr(self, attr, None) def get_config(self): states = { attr: value for attr, value in self.__dict__.iteritems() if not attr.startswith("_") } config = copy.deepcopy(self._kwargs) config.update({"name": self._name, "states": copy.deepcopy(states)}) return config def update(self): raise NotImplementedError() def eval(self): raise NotImplementedError() class CompositeMetric(MetricBase): """ Compute multiple metrics in each minibatch. for example, merge F1, accuracy, recall into one Metric. """ def __init__(self, name=None, **kwargs): super(CompositeMetric, self).__init__(name, kwargs) self._metrics = [] def add_metric(self, metric): if not isinstance(metric, MetricBase): raise ValueError("SubMetric should be inherit from MetricBase.") self._metrics.append(metric) def eval(self): ans = [] for m in self._metrics: ans.append(m.eval()) return ans class Accuracy(MetricBase): """ Accumulate the accuracy from minibatches and compute the average accuracy for every pass. Args: name: the metrics name Example: minibatch_accuracy = fluid.layers.accuracy(pred, label) accuracy_evaluator = fluid.metrics.Accuracy() for epoch in PASS_NUM: accuracy_evaluator.reset() for data in batches: loss = exe.run(fetch_list=[cost, minibatch_accuracy]) accuracy_evaluator.update(value=minibatch_accuracy, weight=batches) accuracy = accuracy_evaluator.eval() """ def __init__(self, name=None): super(Accuracy, self).__init__(name) self.value = .0 self.weight = .0 def update(self, value, weight): if not _is_number_or_matrix_(value): raise ValueError( "The 'value' must be a number(int, float) or a numpy ndarray.") if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") self.value += value * weight self.weight += weight def eval(self): if self.weight == 0: raise ValueError( "There is no data in Accuracy Metrics. Please check layers.accuracy output has added to Accuracy." ) return self.value / self.weight class ChunkEvaluator(MetricBase): """ Accumulate counter numbers output by chunk_eval from mini-batches and compute the precision recall and F1-score using the accumulated counter numbers. """ def __init__(self, name=None): super(ChunkEvaluator, self).__init__(name) self.num_infer_chunks = 0 self.num_label_chunks = 0 self.num_correct_chunks = 0 def update(self, num_infer_chunks, num_label_chunks, num_correct_chunks): if not _is_number_or_matrix_(num_infer_chunks): raise ValueError( "The 'num_infer_chunks' must be a number(int, float) or a numpy ndarray." ) if not _is_number_or_matrix_(num_label_chunks): raise ValueError( "The 'num_label_chunks' must be a number(int, float) or a numpy ndarray." ) if not _is_number_or_matrix_(num_correct_chunks): raise ValueError( "The 'num_correct_chunks' must be a number(int, float) or a numpy ndarray." ) self.num_infer_chunks += num_infer_chunks self.num_label_chunks += num_label_chunks self.num_correct_chunks += num_correct_chunks def eval(self): precision = float( self.num_correct_chunks ) / self.num_infer_chunks if self.num_infer_chunks else 0 recall = float(self.num_correct_chunks ) / self.num_label_chunks if self.num_label_chunks else 0 f1_score = float(2 * precision * recall) / ( precision + recall) if self.num_correct_chunks else 0 return precision, recall, f1_score class EditDistance(MetricBase): """ Accumulate edit distance sum and sequence number from mini-batches and compute the average edit_distance and instance error of all batches. Args: name: the metrics name Example: edit_distance_metrics = fluid.layers.edit_distance(input, label) distance_evaluator = fluid.metrics.EditDistance() for epoch in PASS_NUM: distance_evaluator.reset() for data in batches: loss = exe.run(fetch_list=[cost] + list(edit_distance_metrics)) distance_evaluator.update(*edit_distance_metrics) distance, instance_error = distance_evaluator.eval() In the above example: 'distance' is the average of the edit distance in a pass. 'instance_error' is the instance error rate in a pass. """ def __init__(self, name): super(EditDistance, self).__init__(name) self.total_distance = .0 self.seq_num = 0 self.instance_error = 0 def update(self, distances, seq_num): if not _is_numpy_(distances): raise ValueError("The 'distances' must be a numpy ndarray.") if not _is_number_(seq_num): raise ValueError("The 'seq_num' must be a number(int, float).") seq_right_count = np.sum(distances == 0) total_distance = np.sum(distances) self.seq_num += seq_num self.instance_error += seq_num - seq_right_count self.total_distance += total_distance def eval(self): if self.seq_num == 0: raise ValueError( "There is no data in EditDistance Metric. Please check layers.edit_distance output has been added to EditDistance." ) avg_distance = self.total_distance / self.seq_num avg_instance_error = self.instance_error / self.seq_num return avg_distance, avg_instance_error class DetectionMAP(MetricBase): """ Calculate the detection mean average precision (mAP). TODO (Dang Qingqing): update the following doc. The general steps are as follows: 1. calculate the true positive and false positive according to the input of detection and labels. 2. calculate mAP value, support two versions: '11 point' and 'integral'. Please get more information from the following articles: https://sanchom.wordpress.com/tag/average-precision/ https://arxiv.org/abs/1512.02325 """ def __init__(self, name=None): super(DetectionMAP, self).__init__(name) # the current map value self.value = .0 self.weight = .0 def update(self, value, weight): if not _is_number_or_matrix_(value): raise ValueError( "The 'value' must be a number(int, float) or a numpy ndarray.") if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") self.value += value self.weight += weight def eval(self): if self.weight == 0: raise ValueError( "There is no data in DetectionMAP Metrics. " "Please check layers.detection_map output has added to DetectionMAP." ) return self.value / self.weight class Auc(MetricBase): """ Auc Metrics which adapts to binary classification. Need to note that auc metrics compute the value via Python natively. If you concern the speed, please use the fluid.layers.auc instead. The `auc` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and precision values. The area under the ROC-curve is therefore computed using the height of the recall values by the false positive rate, while the area under the PR-curve is the computed using the height of the precision values by the recall. Args: name: metric name curve: Specifies the name of the curve to be computed, 'ROC' [default] or 'PR' for the Precision-Recall-curve. num_thresholds: The number of thresholds to use when discretizing the roc curve. "NOTE: only implement the ROC curve type via Python now." """ def __init__(self, name, curve='ROC', num_thresholds=200): super(MetricBase, self).__init__(name, curve, num_thresholds) self._curve = curve self._num_thresholds = num_thresholds self._epsilon = 1e-6 self.tp_list = np.ndarray((num_thresholds, )) self.fn_list = np.ndarray((num_thresholds, )) self.tn_list = np.ndarray((num_thresholds, )) self.fp_list = np.ndarray((num_thresholds, )) def update(self, labels, predictions, axis=1): if not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray.") if not _is_numpy_(predictions): raise ValueError("The 'predictions' must be a numpy ndarray.") kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [(i + 1) * 1.0 / (self._num_thresholds - 1) for i in range(self._num_thresholds - 2)] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] # caculate TP, FN, TN, FP count for idx_thresh, thresh in enumerate(thresholds): tp, fn, tn, fp = 0, 0, 0, 0 for i, lbl in enumerate(labels): if lbl: if predictions[i, 0] >= thresh: tp += 1 else: fn += 1 else: if predictions[i, 0] >= thresh: fp += 1 else: tn += 1 self.tp_list[idx_thresh] += tp self.fn_list[idx_thresh] += fn self.tn_list[idx_thresh] += tn self.fp_list[idx_thresh] += fp def eval(self): epsilon = self._epsilon num_thresholds = self._num_thresholds tpr = (self.tp_list.astype("float32") + epsilon) / ( self.tp_list + self.fn_list + epsilon) fpr = self.fp_list.astype("float32") / ( self.fp_list + self.tn_list + epsilon) rec = (self.tp_list.astype("float32") + epsilon) / ( self.tp_list + self.fp_list + epsilon) x = fpr[:num_thresholds - 1] - fpr[1:] y = (tpr[:num_thresholds - 1] + tpr[1:]) / 2.0 auc_value = np.sum(x * y) return auc_value
13,439
34.275591
131
py
Paddle
Paddle-master/python/paddle/fluid/__init__.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function # import all class inside framework into fluid module import framework from framework import * # import all class inside executor into fluid module import executor from executor import * import trainer from trainer import Trainer from trainer import BeginEpochEvent from trainer import EndEpochEvent from trainer import BeginStepEvent from trainer import EndStepEvent import inferencer from inferencer import Inferencer import io import evaluator import initializer import layers import nets import optimizer import backward import regularizer import average import metrics import transpiler from param_attr import ParamAttr, WeightNormParamAttr from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, CUDAPlace, CUDAPinnedPlace from transpiler import DistributeTranspiler, SimpleDistributeTranspiler, \ InferenceTranspiler, memory_optimize, release_memory from concurrency import (Go, make_channel, channel_send, channel_recv, channel_close, Select) from lod_tensor import create_lod_tensor, create_random_int_lodtensor import clip import profiler import unique_name import recordio_writer import parallel_executor from parallel_executor import * Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + \ trainer.__all__ + inferencer.__all__ + transpiler.__all__ + \ parallel_executor.__all__ + lod_tensor.__all__ + [ 'io', 'initializer', 'layers', 'transpiler' 'nets', 'optimizer', 'learning_rate_decay', 'backward', 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace', 'Tensor', 'ParamAttr', 'WeightNormParamAttr', 'DataFeeder', 'clip', 'profiler', 'unique_name', 'recordio_writer', ] def __bootstrap__(): """ Enable reading gflags from environment variables. Returns: None """ import sys import core import os in_test = 'unittest' in sys.modules try: num_threads = int(os.getenv('OMP_NUM_THREADS', '1')) except ValueError: num_threads = 1 if num_threads > 1: print( 'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation ' 'speed will not be optimized if you use data parallel. It will ' 'fail if this PaddlePaddle binary is compiled with OpenBlas since' ' OpenBlas does not support multi-threads.'.format(num_threads), file=sys.stderr) print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr) os.environ['OMP_NUM_THREADS'] = str(num_threads) read_env_flags = [ 'use_pinned_memory', 'check_nan_inf', 'benchmark', 'warpctc_dir', 'eager_delete_scope' ] if core.is_compiled_with_cuda(): read_env_flags += [ 'fraction_of_gpu_memory_to_use', 'cudnn_algo_use_autotune' ] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) # don't init_p2p when in unittest to save time. core.init_devices(not in_test) # TODO(panyx0718): Avoid doing complex initialization logic in __init__.py. # Consider paddle.init(args) or paddle.main(args) layers.monkey_patch_variable() __bootstrap__()
4,168
29.654412
78
py
Paddle
Paddle-master/python/paddle/fluid/data_feeder.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import core import numpy import six.moves as six import multiprocessing from framework import Variable, default_main_program __all__ = ['DataFeeder'] class DataToLoDTensorConverter(object): def __init__(self, place, lod_level, shape, dtype): self.place = place self.lod_level = lod_level self.shape = shape if dtype == core.VarDesc.VarType.FP32: self.dtype = 'float32' elif dtype == core.VarDesc.VarType.INT64: self.dtype = 'int64' elif dtype == core.VarDesc.VarType.FP64: self.dtype = 'float64' elif dtype == core.VarDesc.VarType.INT32: self.dtype = 'int32' elif dtype == core.VarDesc.VarType.UINT8: self.dtype = 'uint8' else: raise ValueError("dtype must be any of [int32, float32, int64, " "float64, uint8]") self.data = [] self.lod = [] for i in six.range(lod_level): self.lod.append([0]) def feed(self, data): self._feed_impl_(data, self.lod, self.lod_level) def _feed_impl_(self, data, lod, lod_level): if lod_level == 0: self.data.append(data) else: cur_lod_len = len(data) lod[0].append(lod[0][-1] + cur_lod_len) for each_data in data: self._feed_impl_(each_data, lod[1:], lod_level - 1) def done(self): arr = numpy.array(self.data, dtype=self.dtype).reshape(self.shape) t = core.LoDTensor() t.set(arr, self.place) if self.lod_level > 0: t.set_lod(self.lod) return t class DataFeeder(object): def __init__(self, feed_list, place, program=None): self.feed_dtypes = [] self.feed_names = [] self.feed_shapes = [] self.feed_lod_level = [] if program is None: program = default_main_program() for each_var in feed_list: if isinstance(each_var, basestring): each_var = program.block(0).var(each_var) if not isinstance(each_var, Variable): raise TypeError("Feed list should contain a list of variable") self.feed_dtypes.append(each_var.dtype) self.feed_names.append(each_var.name) shape = each_var.shape batch_size_dim = -1 for i, s in enumerate(shape): if s < 0: batch_size_dim = i break if batch_size_dim == -1: raise ValueError("Variable {0} must has a batch size dimension", each_var.name) self.feed_lod_level.append(each_var.lod_level) self.feed_shapes.append(shape) self.place = place def feed(self, iterable): converter = [] for lod_level, shape, dtype in six.zip( self.feed_lod_level, self.feed_shapes, self.feed_dtypes): converter.append( DataToLoDTensorConverter( place=self.place, lod_level=lod_level, shape=shape, dtype=dtype)) for each_sample in iterable: assert len(each_sample) == len(converter), ( "The number of fields in data (%s) does not match " + "len(feed_list) (%s)") % (len(each_sample), len(converter)) for each_converter, each_slot in six.zip(converter, each_sample): each_converter.feed(each_slot) ret_dict = {} for each_name, each_converter in six.zip(self.feed_names, converter): ret_dict[each_name] = each_converter.done() return ret_dict def feed_parallel(self, iterable, num_places=None): if isinstance(self.place, core.CUDAPlace): places = [ core.CUDAPlace(i) for i in six.xrange(self._get_number_of_places_(num_places)) ] else: places = [ core.CPUPlace() for _ in six.xrange(self._get_number_of_places_(num_places)) ] if len(iterable) != len(places): raise ValueError("feed_parallel takes multiple mini-batches. Each " "mini-batch will be feed on each device. The " "number of devices and number of mini-batches " "must be same.") place = self.place for p, batch in six.zip(places, iterable): self.place = p yield self.feed(batch) self.place = place def _get_number_of_places_(self, num_places): if num_places is not None: return int(num_places) elif isinstance(self.place, core.CUDAPlace): return core.get_cuda_device_count() else: return multiprocessing.cpu_count() def decorate_reader(self, reader, multi_devices, num_places=None, drop_last=True): def __reader_creator__(): if not multi_devices: for item in reader(): yield self.feed(item) else: num = self._get_number_of_places_(num_places) item = [] for batch in reader(): item.append(batch) if len(item) == num: yield list(self.feed_parallel(item, num)) item = [] if not drop_last and len(item) != 0: raise ValueError( "The data batch which cannot fit for devices will be " "dropped is not implementation. Other strategies are " "not implemented") return __reader_creator__
6,561
35.659218
80
py
Paddle
Paddle-master/python/paddle/fluid/nets.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import layers __all__ = [ "simple_img_conv_pool", "sequence_conv_pool", "glu", "scaled_dot_product_attention", ] def simple_img_conv_pool(input, num_filters, filter_size, pool_size, pool_stride, act, param_attr=None, pool_type='max', use_cudnn=True, use_mkldnn=False): conv_out = layers.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, param_attr=param_attr, act=act, use_cudnn=use_cudnn, use_mkldnn=use_mkldnn) pool_out = layers.pool2d( input=conv_out, pool_size=pool_size, pool_type=pool_type, pool_stride=pool_stride, use_cudnn=use_cudnn, use_mkldnn=use_mkldnn) return pool_out def img_conv_group(input, conv_num_filter, pool_size, conv_padding=1, conv_filter_size=3, conv_act=None, param_attr=None, conv_with_batchnorm=False, conv_batchnorm_drop_rate=0.0, pool_stride=1, pool_type=None, use_cudnn=True, use_mkldnn=False): """ Image Convolution Group, Used for vgg net. """ tmp = input assert isinstance(conv_num_filter, list) or \ isinstance(conv_num_filter, tuple) def __extend_list__(obj): if not hasattr(obj, '__len__'): return [obj] * len(conv_num_filter) else: return obj conv_padding = __extend_list__(conv_padding) conv_filter_size = __extend_list__(conv_filter_size) param_attr = __extend_list__(param_attr) conv_with_batchnorm = __extend_list__(conv_with_batchnorm) conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate) for i in xrange(len(conv_num_filter)): local_conv_act = conv_act if conv_with_batchnorm[i]: local_conv_act = None tmp = layers.conv2d( input=tmp, num_filters=conv_num_filter[i], filter_size=conv_filter_size[i], padding=conv_padding[i], param_attr=param_attr[i], act=local_conv_act, use_cudnn=use_cudnn, use_mkldnn=use_mkldnn) if conv_with_batchnorm[i]: tmp = layers.batch_norm(input=tmp, act=conv_act, in_place=True) drop_rate = conv_batchnorm_drop_rate[i] if abs(drop_rate) > 1e-5: tmp = layers.dropout(x=tmp, dropout_prob=drop_rate) pool_out = layers.pool2d( input=tmp, pool_size=pool_size, pool_type=pool_type, pool_stride=pool_stride, use_cudnn=use_cudnn, use_mkldnn=use_mkldnn) return pool_out def sequence_conv_pool(input, num_filters, filter_size, param_attr=None, act="sigmoid", pool_type="max"): conv_out = layers.sequence_conv( input=input, num_filters=num_filters, filter_size=filter_size, param_attr=param_attr, act=act) pool_out = layers.sequence_pool(input=conv_out, pool_type=pool_type) return pool_out def glu(input, dim=-1): """ The gated linear unit composed by split, sigmoid activation and elementwise multiplication. Specifically, Split the input into two equal sized parts :math:`a` and :math:`b` along the given dimension and then compute as following: .. math:: {GLU}(a, b)= a \otimes \sigma(b) Refer to `Language Modeling with Gated Convolutional Networks <https://arxiv.org/pdf/1612.08083.pdf>`_. Args: input (Variable): The input variable which is a Tensor or LoDTensor. dim (int): The dimension along which to split. If :math:`dim < 0`, the dimension to split along is :math:`rank(input) + dim`. Returns: Variable: The Tensor variable with half the size of input. Examples: .. code-block:: python # x is a Tensor variable with shape [3, 6, 9] fluid.nets.glu(input=x, dim=1) # shape of output: [3, 3, 9] """ a, b = layers.split(input, num_or_sections=2, dim=dim) act_b = layers.sigmoid(x=b) out = layers.elementwise_mul(x=a, y=act_b) return out def scaled_dot_product_attention(queries, keys, values, num_heads=1, dropout_rate=0.): """ The dot-product attention. Attention mechanism can be seen as mapping a query and a set of key-value pairs to an output. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function (dot-product here) of the query with the corresponding key. The dot-product attention can be implemented through (batch) matrix multipication as follows: .. math:: Attention(Q, K, V)= softmax(QK^\mathrm{T})V Refer to `Attention Is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`_. Args: queries (Variable): The input variable which should be a 3-D Tensor. keys (Variable): The input variable which should be a 3-D Tensor. values (Variable): The input variable which should be a 3-D Tensor. num_heads (int): Head number to compute the scaled dot product attention. Default value is 1. dropout_rate (float): The dropout rate to drop the attention weight. Default value is 0. Returns: Variable: A 3-D Tensor computed by multi-head scaled dot product \ attention. Raises: ValueError: If input queries, keys, values are not 3-D Tensors. NOTE: 1. When num_heads > 1, three linear projections are learned respectively to map input queries, keys and values into queries', keys' and values'. queries', keys' and values' have the same shapes with queries, keys and values. 1. When num_heads == 1, scaled_dot_product_attention has no learnable parameters. Examples: .. code-block:: python # Suppose q, k, v are Tensors with the following shape: # q: [3, 5, 9], k: [3, 6, 9], v: [3, 6, 10] contexts = fluid.nets.scaled_dot_product_attention(q, k, v) contexts.shape # [3, 5, 10] """ if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3): raise ValueError( "Inputs quries, keys and values should all be 3-D tensors.") if queries.shape[-1] != keys.shape[-1]: raise ValueError( "The hidden size of queries and keys should be the same.") if keys.shape[-2] != values.shape[-2]: raise ValueError( "The max sequence length in query batch and in key batch " "should be the same.") if keys.shape[-1] % num_heads != 0: raise ValueError("The hidden size of keys (%d) must be divisible " "by the number of attention heads (%d)." % (keys.shape[-1], num_heads)) if values.shape[-1] % num_heads != 0: raise ValueError("The hidden size of values (%d) must be divisible " "by the number of attention heads (%d)." % (values.shape[-1], num_heads)) def __compute_qkv(queries, keys, values, num_heads): """ Add linear projection to queries, keys, and values. Args: queries(Tensor): a 3-D input Tensor. keys(Tensor): a 3-D input Tensor. values(Tensor): a 3-D input Tensor. num_heads(int): The number of heads. Linearly project the inputs ONLY when num_heads > 1. Returns: Tensor: linearly projected output Tensors: queries', keys' and values'. They have the same shapes with queries, keys and values. """ if num_heads == 1: return queries, keys, values q = layers.fc(input=queries, size=queries.shape[-1], num_flatten_dims=2) k = layers.fc(input=keys, size=keys.shape[-1], num_flatten_dims=2) v = layers.fc(input=values, size=values.shape[-1], num_flatten_dims=2) return q, k, v def __split_heads(x, num_heads): """ Reshape the last dimension of inpunt tensor x so that it becomes two dimensions. Args: x(Tensor): a 3-D input Tensor. num_heads(int): The number of heads. Returns: Tensor: a Tensor with shape [..., n, m/num_heads], where m is size of the last dimension of x. """ if num_heads == 1: return x hidden_size = x.shape[-1] # reshape the 3-D input: [batch_size, max_sequence_length, hidden_dim] # into a 4-D output: # [batch_size, max_sequence_length, num_heads, hidden_size_per_head]. reshaped = layers.reshape( x=x, shape=list(x.shape[:-1]) + [num_heads, hidden_size // num_heads]) # permuate the dimensions into: # [batch_size, num_heads, max_sequence_len, hidden_size_per_head] return layers.transpose(x=reshaped, perm=[0, 2, 1, 3]) def __combine_heads(x): """ Reshape the last two dimensions of inpunt tensor x so that it becomes one dimension. Args: x(Tensor): a 4-D input Tensor with shape [bs, num_heads, max_sequence_length, hidden_dim]. Returns: Tensor: a Tensor with shape [bs, max_sequence_length, num_heads * hidden_dim]. """ if len(x.shape) == 3: return x if len(x.shape) != 4: raise ValueError("Input(x) should be a 4-D Tensor.") trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) return layers.reshape( x=trans_x, shape=map(int, [ trans_x.shape[0], trans_x.shape[1], trans_x.shape[2] * trans_x.shape[3] ])) q, k, v = __compute_qkv(queries, keys, values, num_heads) q = __split_heads(q, num_heads) k = __split_heads(k, num_heads) v = __split_heads(v, num_heads) key_dim_per_head = keys.shape[-1] // num_heads scaled_q = layers.scale(x=q, scale=key_dim_per_head**-0.5) product = layers.matmul(x=k, y=scaled_q, transpose_y=True) weights = layers.reshape( x=layers.reshape( x=product, shape=[-1, product.shape[-1]], act="softmax"), shape=product.shape) if dropout_rate: weights = layers.dropout( weights, dropout_prob=dropout_rate, is_test=False) ctx_multiheads = layers.matmul(weights, v) return __combine_heads(ctx_multiheads)
11,868
33.303468
80
py
Paddle
Paddle-master/python/paddle/fluid/io.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import shutil from paddle.fluid.evaluator import Evaluator from paddle.fluid.framework import Program, Parameter, default_main_program, Variable from . import core __all__ = [ 'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params', 'load_persistables', 'save_inference_model', 'load_inference_model', 'get_inference_program', 'save_checkpoint', 'load_checkpoint', 'clean_checkpoint' ] def is_parameter(var): """Check whether the variable is a Parameter. This function checks whether the input variable is a Parameter. Args: var : The input variable. Returns: boolean result whether the variable is a Parameter. """ return isinstance(var, Parameter) def is_persistable(var): if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ var.desc.type() == core.VarDesc.VarType.FETCH_LIST: return False return var.persistable def _clone_var_in_block_(block, var): assert isinstance(var, Variable) return block.create_var( name=var.name, shape=var.shape, dtype=var.dtype, type=var.type, lod_level=var.lod_level, persistable=True) def save_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None): """ Save variables to directory by executor. :param executor: executor that save variable :param dirname: directory path :param main_program: program. If vars is None, then filter all variables in this program which fit `predicate`. Default default_main_program. :param predicate: The Predicate describes a callable that returns a variable as a bool. If it returns true, the corresponding input variable will be saved. :param vars: variables need to be saved. If vars is specified, program & predicate will be ignored :param filename: The name of a single file that all vars are saved to. If it is None, save variables to separate files. :return: None """ if vars is None: if main_program is None: main_program = default_main_program() if not isinstance(main_program, Program): raise TypeError("program should be as Program type or None") save_vars( executor, dirname=dirname, vars=filter(predicate, main_program.list_vars()), filename=filename) else: save_program = Program() save_block = save_program.global_block() save_var_map = {} for each_var in vars: # NOTE: don't save the variable which type is RAW if each_var.type == core.VarDesc.VarType.RAW: continue new_var = _clone_var_in_block_(save_block, each_var) if filename is None: save_block.append_op( type='save', inputs={'X': [new_var]}, outputs={}, attrs={'file_path': os.path.join(dirname, new_var.name)}) else: save_var_map[new_var.name] = new_var if filename is not None: save_var_list = [] for name in sorted(save_var_map.keys()): save_var_list.append(save_var_map[name]) save_block.append_op( type='save_combine', inputs={'X': save_var_list}, outputs={}, attrs={'file_path': os.path.join(dirname, filename)}) executor.run(save_program) def save_params(executor, dirname, main_program=None, filename=None): """ Save all parameters to directory with executor. """ save_vars( executor, dirname=dirname, main_program=main_program, vars=None, predicate=is_parameter, filename=filename) def save_persistables(executor, dirname, main_program=None, filename=None): """ Save all persistables to directory with executor. """ save_vars( executor, dirname=dirname, main_program=main_program, vars=None, predicate=is_persistable, filename=filename) def load_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None): """ Load variables from directory by executor. :param executor: executor that load variable :param dirname: directory path :param main_program: program. If vars is None, then filter all variables in this program which fit `predicate`. Default default_main_program(). :param predicate: The Predicate describes a callable that returns a variable as a bool. If it returns true, the corresponding input variable will be loaded. :param vars: variables need to be loaded. If vars is specified, program & predicate will be ignored :param filename: The name of the single file that all vars are loaded from. If it is None, load variables from separate files. :return: None """ if vars is None: if main_program is None: main_program = default_main_program() if not isinstance(main_program, Program): raise TypeError("program's type should be Program") load_vars( executor, dirname=dirname, vars=filter(predicate, main_program.list_vars()), filename=filename) else: load_prog = Program() load_block = load_prog.global_block() load_var_map = {} for each_var in vars: assert isinstance(each_var, Variable) if each_var.type == core.VarDesc.VarType.RAW: continue new_var = _clone_var_in_block_(load_block, each_var) if filename is None: load_block.append_op( type='load', inputs={}, outputs={'Out': [new_var]}, attrs={'file_path': os.path.join(dirname, new_var.name)}) else: load_var_map[new_var.name] = new_var if filename is not None: load_var_list = [] for name in sorted(load_var_map.keys()): load_var_list.append(load_var_map[name]) load_block.append_op( type='load_combine', inputs={}, outputs={"Out": load_var_list}, attrs={'file_path': os.path.join(dirname, filename)}) executor.run(load_prog) def load_params(executor, dirname, main_program=None, filename=None): """ load all parameters from directory by executor. """ load_vars( executor, dirname=dirname, main_program=main_program, predicate=is_parameter, filename=filename) def load_persistables(executor, dirname, main_program=None, filename=None): """ load all persistables from directory by executor. """ load_vars( executor, dirname=dirname, main_program=main_program, predicate=is_persistable, filename=filename) def get_inference_program(target_vars, main_program=None): if main_program is None: main_program = default_main_program() if not isinstance(target_vars, list): target_vars = [target_vars] vars = [] for var in target_vars: if isinstance(var, Evaluator): vars.extend(var.states) vars.extend(var.metrics) else: vars.append(var) pruned_program = main_program.prune(targets=vars) inference_program = pruned_program.inference_optimize() return inference_program def prepend_feed_ops(inference_program, feed_target_names, feed_holder_name='feed'): if len(feed_target_names) == 0: return global_block = inference_program.global_block() feed_var = global_block.create_var( name=feed_holder_name, type=core.VarDesc.VarType.FEED_MINIBATCH, persistable=True) for i, name in enumerate(feed_target_names): out = global_block.var(name) global_block.prepend_op( type='feed', inputs={'X': [feed_var]}, outputs={'Out': [out]}, attrs={'col': i}) def append_fetch_ops(inference_program, fetch_target_names, fetch_holder_name='fetch'): global_block = inference_program.global_block() fetch_var = global_block.create_var( name=fetch_holder_name, type=core.VarDesc.VarType.FETCH_LIST, persistable=True) for i, name in enumerate(fetch_target_names): global_block.append_op( type='fetch', inputs={'X': [name]}, outputs={'Out': [fetch_var]}, attrs={'col': i}) def save_inference_model(dirname, feeded_var_names, target_vars, executor, main_program=None, model_filename=None, params_filename=None): """ Build a model especially for inference, and save it to directory by the executor. :param dirname: directory path :param feeded_var_names: Names of variables that need to be feeded data during inference :param target_vars: Variables from which we can get inference results. :param executor: executor that save inference model :param main_program: original program, which will be pruned to build the inference model. Default default_main_program(). :param model_filename: The name of file to save inference program. If not specified, default filename `__model__` will be used. :param params_filename: The name of file to save parameters. It is used for the case that all parameters are saved in a single binary file. If not specified, parameters are considered saved in separate files. :return: None """ if isinstance(feeded_var_names, basestring): feeded_var_names = [feeded_var_names] else: if len(feeded_var_names) > 0: if not (bool(feeded_var_names) and all( isinstance(name, basestring) for name in feeded_var_names)): raise ValueError("'feed_var_names' should be a list of str.") if isinstance(target_vars, Variable): target_vars = [target_vars] else: if not (bool(target_vars) and all( isinstance(var, Variable) for var in target_vars)): raise ValueError("'target_vars' should be a list of Variable.") if main_program is None: main_program = default_main_program() copy_program = main_program.clone() if not os.path.isdir(dirname): os.makedirs(dirname) # Clear the is_target information and remove the existed feed and fetch op global_block = copy_program.global_block() for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == "feed" or op.type == "fetch": global_block.remove_op(i) copy_program.desc.flush() pruned_program = copy_program.prune(targets=target_vars) inference_program = pruned_program.inference_optimize() fetch_var_names = [v.name for v in target_vars] prepend_feed_ops(inference_program, feeded_var_names) append_fetch_ops(inference_program, fetch_var_names) if model_filename is not None: model_filename = os.path.basename(model_filename) else: model_filename = "__model__" model_filename = os.path.join(dirname, model_filename) if params_filename is not None: params_filename = os.path.basename(params_filename) with open(model_filename, "wb") as f: f.write(inference_program.desc.serialize_to_string()) save_persistables(executor, dirname, inference_program, params_filename) def load_inference_model(dirname, executor, model_filename=None, params_filename=None): """ Load inference model from a directory :param dirname: directory path :param executor: executor that load inference model :param model_filename: The name of file to load inference program. If not specified, default filename `__model__` will be used. :param params_filename: The name of file to load parameters. It is used for the case that all parameters are saved in a single binary file. If not specified, parameters are considered saved in separate files. :return: [program, feed_target_names, fetch_targets] program: program especially for inference. feed_target_names: Names of variables that need to feed data fetch_targets: Variables from which we can get inference results. """ if not os.path.isdir(dirname): raise ValueError("There is no directory named '%s'", dirname) if model_filename is not None: model_filename = os.path.basename(model_filename) else: model_filename = "__model__" model_filename = os.path.join(dirname, model_filename) if params_filename is not None: params_filename = os.path.basename(params_filename) with open(model_filename, "rb") as f: program_desc_str = f.read() program = Program.parse_from_string(program_desc_str) load_persistables(executor, dirname, program, params_filename) feed_target_names = program.desc.get_feed_target_names() fetch_target_names = program.desc.get_fetch_target_names() fetch_targets = [ program.global_block().var(name) for name in fetch_target_names ] return [program, feed_target_names, fetch_targets] def get_parameter_value(para, executor): """ Get the LoDTensor for the parameter :param executor: executor for retrieving the value :param para: the given parameter :return: the LoDTensor for the parameter """ assert is_parameter(para) get_program = Program() block = get_program.global_block() new_var = _clone_var_in_block_(block, para) return executor.run(get_program, feed={}, fetch_list=[new_var])[0] def get_parameter_value_by_name(name, executor, program=None): """ Get the LoDTensor for paramter with the given name :param executor: executor for retrieving the value :param name: the name of the parameter :param program: the program where the variable is found Default default_main_program(). :return: the LoDTensor for the variable """ if program is None: program = default_main_program() var = program.global_block().var(name) return get_parameter_value(var, executor) SUCCESS_MARK_FILENAME = "_SUCCESS" CHECKPOINT_PREFIX = "checkpoint" CHECKPOINT_SEPARATOR = "_" def save_checkpoint(executor, checkpoint_dir=None, max_num_checkpoints=3, save_interval_secs=600, main_program=None): """ Save Checkpoint will save persistable LodTensor variables from main_program in checkpoint directory, the directory named by serial number from 0 to (n -1), save_checkpoint use LRU strategy to keep numbers of checkpoint directory, the numbers of checkpoint directory are max_num_checkpoints at most, The interval between two saved checkpoints must greater than save_interval_secs. :param executor :param checkpoint_dir :param max_num_checkpoints :param save_interval_secs :param main_program """ if checkpoint_dir is None: checkpoint_dir = os.getcwd() if not os.path.isdir(checkpoint_dir): os.makedirs(checkpoint_dir) serial = _get_lastest_checkpoint_dir(checkpoint_dir) if serial >= 0 and not _interval_secs_exceed( _get_serial_dir(serial, checkpoint_dir), save_interval_secs): return serial += 1 cur_dir = _get_serial_dir(serial, checkpoint_dir) save_vars( executor, dirname=cur_dir, main_program=main_program, vars=None, predicate=_is_checkpoint_var, filename=None) _write_success(cur_dir) _lru_delete(checkpoint_dir, max_num_checkpoints) def load_checkpoint(executor, checkpoint_dir=None, main_program=None): """ Load checkpoint from a directory by executor, it will find the most recent saved checkpoint file and load it auto. :param executor :param checkpoint_dir :param main_program """ if checkpoint_dir is None: checkpoint_dir = os.getcwd() serial = _get_lastest_checkpoint_dir(checkpoint_dir) if serial < 0: return cur_dir = _get_serial_dir(serial, checkpoint_dir) load_vars( executor, dirname=cur_dir, main_program=main_program, predicate=_is_checkpoint_var, filename=None) def clean_checkpoint(checkpoint_dir, delete_dir=False): """ clean the checkpoint dir, when the train exits normally, the trainer will call clean_checkpoint to delete checkpoint directory saved before. delete_dir only works when the directory is empty, otherwise, OSError is raised. """ if checkpoint_dir is None: checkpoint_dir = os.getcwd() _lru_delete(checkpoint_dir, max_num_checkpoints=0) if delete_dir and not os.listdir(checkpoint_dir): os.rmdir(checkpoint_dir) def _get_serial_dir(serial, checkpoint_dir): serial_folder = CHECKPOINT_PREFIX + CHECKPOINT_SEPARATOR + str(serial) return os.path.join(checkpoint_dir, serial_folder) def _is_checkpoint_var(var): """ the checkpoint will not save or load all the variables. var type is FEED_MINIBATCH/FETCH_LIST/RAW or var name ends with @GRAD are discarded. :param var """ if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ var.desc.type() == core.VarDesc.VarType.RAW: return False if var.name.endswith("@GRAD"): return False return var.persistable def _interval_secs_exceed(dirname, save_interval_secs): dir_time = os.path.getmtime(dirname) if save_interval_secs > (time.time() - dir_time): return False return True def _lru_delete(dirname, max_num_checkpoints=3): dirs = os.listdir(dirname) serials = [] for serial in dirs: try: serials.append(int(serial)) except ValueError: continue if len(serials) <= max_num_checkpoints: return serials.sort(reverse=True) serials = serials[max_num_checkpoints:] for serial in serials: cur_dir = os.path.join(dirname, str(serial)) shutil.rmtree(cur_dir) def _write_success(dirname): """ write an empty file named "_SUCCESS" in checkpoint dir, indicate this checkpoint is correct. :param dirname """ success_file = os.path.join(dirname, SUCCESS_MARK_FILENAME) with open(success_file, 'a') as f: now = time.ctime() f.write(now) def _get_lastest_checkpoint_dir(checkpoint_dir): """ get the latest file in checkpoint directory, the _SUCCESS file must exist in the directory :param checkpoint_dir """ if not checkpoint_dir.strip(): return -1 def has_success(checkpoint_dir, cur_dir): """ is _SUCCESS in this dir """ _, serial = cur_dir.split(CHECKPOINT_SEPARATOR) try: int(serial) except ValueError: return -1 if not os.path.isdir(os.path.join(checkpoint_dir, cur_dir)): return -1 success_path = os.path.join( _get_serial_dir(serial, checkpoint_dir), SUCCESS_MARK_FILENAME) if os.path.isfile(success_path): return int(serial) if not os.path.isdir(checkpoint_dir): return -1 current_dir = -1 dirs = os.listdir(checkpoint_dir) for cur_dir in dirs: success_num = has_success(checkpoint_dir, cur_dir) if success_num > current_dir: current_dir = success_num return current_dir
20,900
31.404651
144
py
Paddle
Paddle-master/python/paddle/fluid/profiler.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import core from contextlib import contextmanager import os __all__ = [ 'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler', 'stop_profiler' ] NVPROF_CONFIG = [ "gpustarttimestamp", "gpuendtimestamp", "gridsize3d", "threadblocksize", "streamid", "enableonstart 0", "conckerneltrace", ] @contextmanager def cuda_profiler(output_file, output_mode=None, config=None): """The CUDA profiler. This fuctions is used to profile CUDA program by CUDA runtime application programming interface. The profiling result will be written into `output_file` with Key-Value pair format or Comma separated values format. The user can set the output mode by `output_mode` argument and set the counters/options for profiling by `config` argument. The default config is ['gpustarttimestamp', 'gpustarttimestamp', 'gridsize3d', 'threadblocksize', 'streamid', 'enableonstart 0', 'conckerneltrace']. Args: output_file (string) : The output file name, the result will be written into this file. output_mode (string) : The output mode has Key-Value pair format and Comma separated values format. It should be 'kvp' or 'csv'. config (list of string) : The profiler options and counters can refer to "Compute Command Line Profiler User Guide". """ if output_mode is None: output_mode = 'csv' if output_mode not in ['kvp', 'csv']: raise ValueError("The output mode must be 'kvp' or 'csv'.") config = NVPROF_CONFIG if config is None else config config_file = 'nvprof_config_file' with open(config_file, 'wb') as fp: fp.writelines(["%s\n" % item for item in config]) core.nvprof_init(output_file, output_mode, config_file) # Enables profiler collection by the active CUDA profiling tool. core.nvprof_start() yield # Disables profiler collection. core.nvprof_stop() os.remove(config_file) def reset_profiler(): """The profiler clear interface. reset_profiler will clear the previous time record. """ core.reset_profiler() def start_profiler(state): """Enable the profiler. Args: state (string) : The profiling state, which should be 'CPU', 'GPU' or 'All'. 'CPU' means only profile CPU. 'GPU' means profiling GPU as well. 'All' also generates timeline. """ if core.is_profiler_enabled(): return if state not in ['CPU', 'GPU', "All"]: raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.") if state == "GPU": prof_state = core.ProfilerState.kCUDA elif state == "CPU": prof_state = core.ProfilerState.kCPU else: prof_state = core.ProfilerState.kAll core.enable_profiler(prof_state) def stop_profiler(sorted_key=None, profile_path='/tmp/profile'): """Stop the profiler. Args: sorted_key (string) : If None, the profiling results will be printed in the order of first end time of events. Otherwise, the profiling results will be sorted by the this flag. This flag should be one of 'calls', 'total', 'max', 'min' or 'ave'. The `calls` means sorting by the number of calls. The `total` means sorting by the total execution time. The `max` means sorting by the maximum execution time. The `min` means sorting by the minimum execution time. The `ave` means sorting by the average execution time. profile_path (string) : If state == 'All', it will write a profile proto output file. """ if not core.is_profiler_enabled(): return sorted_key = 'default' if sorted_key is None else sorted_key if sorted_key not in ['default', 'calls', 'total', 'max', 'min', 'ave']: raise ValueError("The sorted_key must be None or in 'calls', 'total', " "'max', 'min' and 'ave'") key_map = { 'default': core.EventSortingKey.kDefault, 'calls': core.EventSortingKey.kCalls, 'total': core.EventSortingKey.kTotal, 'max': core.EventSortingKey.kMax, 'min': core.EventSortingKey.kMin, 'ave': core.EventSortingKey.kAve, } # TODO(qingqing) : redirect C++ ostream to Python stream. # with core.ostream_redirect(stdout=True, stderr=True): core.disable_profiler(key_map[sorted_key], profile_path) @contextmanager def profiler(state, sorted_key=None, profile_path='/tmp/profile'): """The profiler interface. Different from cuda_profiler, this profiler can be used to profile both CPU and GPU program. By defalut, it records the CPU and GPU operator kernels, if you want to profile other program, you can refer the profiling tutorial to add more records. Args: state (string) : The profiling state, which should be 'CPU' or 'GPU', telling the profiler to use CPU timer or GPU timer for profiling. Although users may have already specified the execution place (CPUPlace/CUDAPlace) in the begining, for flexibility the profiler would not inherit this place. sorted_key (string) : If None, the profiling results will be printed in the order of first end time of events. Otherwise, the profiling results will be sorted by the this flag. This flag should be one of 'calls', 'total', 'max', 'min' or 'ave'. The `calls` means sorting by the number of calls. The `total` means sorting by the total execution time. The `max` means sorting by the maximum execution time. The `min` means sorting by the minimum execution time. The `ave` means sorting by the average execution time. profile_path (string) : If state == 'All', it will write a profile proto output file. """ start_profiler(state) yield stop_profiler(sorted_key, profile_path)
6,610
39.558282
79
py
Paddle
Paddle-master/python/paddle/fluid/trainer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import os import core import data_feeder import executor import framework import io # optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module import optimizer as opt_module import parallel_executor from transpiler import distribute_transpiler __all__ = [ 'Trainer', 'BeginEpochEvent', 'EndEpochEvent', 'BeginStepEvent', 'EndStepEvent', ] class BeginEpochEvent(object): def __init__(self, epoch_id): self.epoch = epoch_id class EndEpochEvent(object): def __init__(self, epoch_id): self.epoch = epoch_id class BeginStepEvent(object): def __init__(self, epoch_id, step_id): self.epoch = epoch_id self.step = step_id self.fetch_metrics = True class EndStepEvent(object): def __init__(self, epoch_id, step_id, metrics): self.epoch = epoch_id self.step = step_id self.metrics = metrics def check_and_get_place(place): """ Check the type of place or get the default place Args: place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on. Raises: TypeError if the type mismatched. Returns: the original place if it is not None. if fluid is compiled with CUDA, returns CUDAPlace(0) by default. Otherwise returns CPUPlace by default. """ if place is None: if core.is_compiled_with_cuda(): return core.CUDAPlace(0) else: return core.CPUPlace() else: if not isinstance(place, core.CUDAPlace) and not isinstance( place, core.CPUPlace): raise TypeError("Place should be either CUDAPlace or CPUPlace") return place class Trainer(object): """ Args: train_func(callable): A function which will return loss. The loss must be a scalar. optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer place: The device place of this trainer. """ def __init__(self, train_func, optimizer, param_path=None, place=None, parallel=False): self.__stop = False self.parallel = parallel # 1. we need to generate a framework.Program by calling # program_func. Reference: fluid.program_guard in # test_word2vec.py if not isinstance(optimizer, opt_module.Optimizer): raise TypeError("The optimizer should be an instance of Optimizer") self.scope = core.Scope() self.startup_program = framework.Program() self.train_program = framework.Program() with framework.program_guard(self.train_program, self.startup_program): program_func_outs = train_func() self.train_func_outputs = program_func_outs if isinstance( program_func_outs, list) else [program_func_outs] self.test_program = self.train_program.clone() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") # The fisrt element of program_func_outs is loss. loss = self.train_func_outputs[0] optimize_ops, params_grads = optimizer.minimize(loss) self.place = check_and_get_place(place) self._dist_transpile_if_necessary(optimize_ops, params_grads) # 2. move the default_main_program to self.program and run the # default_startup program on an empty core.Scope() # Run startup program with self._prog_and_scope_guard(): exe = executor.Executor(place) exe.run(self.startup_program) if param_path: # load params from param_path into scope io.load_persistables(exe, dirname=param_path) def _transpile_nccl2_dist(self): # PADDLE_TRAINER_IPS if "PADDLE_TRAINER_IPS" not in os.environ: self.nccl_id_var = None else: self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) port = os.getenv("PADDLE_PSERVER_PORT") worker_ips = os.getenv("PADDLE_TRAINER_IPS") worker_endpoints = [] for ip in worker_ips.split(","): worker_endpoints.append(':'.join([ip, port])) self.num_trainers = len(worker_endpoints) current_endpoint = os.getenv("POD_IP") + ":" + port worker_endpoints.remove(current_endpoint) # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id # in ParallelExecutor to start # distributed training using NCCL2 self.nccl_id_var = self.startup_program.global_block().create_var( name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW) self.startup_program.global_block().append_op( type="gen_nccl_id", inputs={}, outputs={"NCCLID": self.nccl_id_var}, attrs={ "endpoint": current_endpoint, "endpoint_list": worker_endpoints, "trainer_id": self.trainer_id }) def _dist_transpile_if_necessary(self, optimize_ops, params_grads): self._transpile_nccl2_dist() if self.nccl_id_var != None: return if "PADDLE_TRAINING_ROLE" not in os.environ: return # the port of all pservers, needed by both trainer and pserver port = os.getenv("PADDLE_PSERVER_PORT", "6174") # comma separated ips of all pservers, needed by trainer and # pserver pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "") eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # total number of workers/trainers in the job, needed by # trainer and pserver trainers = int(os.getenv("PADDLE_TRAINERS")) # the IP of the local machine, needed by pserver only current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port # the unique trainer id, starting from 0, needed by trainer # only trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) # the role, should be either PSERVER or TRAINER training_role = os.getenv("PADDLE_TRAINING_ROLE") with self._prog_and_scope_guard(): t = distribute_transpiler.DistributeTranspiler() t.transpile( trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": self.train_program = t.get_pserver_program(current_endpoint) self.startup_program = t.get_startup_program(current_endpoint, self.train_program) elif training_role == "TRAINER": self.train_program = t.get_trainer_program() else: raise ValueError( 'TRAINING_ROLE environment variable must be either TRAINER or PSERVER' ) def stop(self): """ stop training """ self.__stop = True def train(self, num_epochs, event_handler, reader=None, feed_order=None): """ Train the model. Args: num_epochs: The number of epoch. An epoch will process all data in reader event_handler: The event handler. A function with type (ev:Event)->void reader: feed_order: Feeding order of reader. None will following the defining order in program Returns: """ training_role = os.getenv("PADDLE_TRAINING_ROLE", "") if training_role == "PSERVER": with self._prog_and_scope_guard(): exe = executor.Executor(self.place) exe.run() return if self.parallel: self._train_by_parallel_executor(num_epochs, event_handler, reader, feed_order) else: self._train_by_executor(num_epochs, event_handler, reader, feed_order) def test(self, reader, feed_order): """ Test the model on given test data Args: reader: The reader that yields test data. feed_order: Feeding order of reader. None will following the defining order in program """ return self._test_by_executor(reader, feed_order, self.train_func_outputs) def save_params(self, param_path): # reference: save_persistables in io.py with self._prog_and_scope_guard(): exe = executor.Executor(self.place) io.save_persistables(exe, dirname=param_path) @contextlib.contextmanager def _prog_and_scope_guard(self): with framework.program_guard( main_program=self.train_program, startup_program=self.startup_program): with executor.scope_guard(self.scope): yield def _train_by_executor(self, num_epochs, event_handler, reader, feed_order): """ Train by Executor and single device. Args: num_epochs: event_handler: reader: feed_order: Returns: """ with self._prog_and_scope_guard(): feed_var_list = build_feed_var_list(self.train_program, feed_order) feeder = data_feeder.DataFeeder( feed_list=feed_var_list, place=self.place) exe = executor.Executor(self.place) reader = feeder.decorate_reader(reader, multi_devices=False) self._train_by_any_executor(event_handler, exe, num_epochs, reader) def _train_by_any_executor(self, event_handler, exe, num_epochs, reader): for epoch_id in range(num_epochs): event_handler(BeginEpochEvent(epoch_id)) for step_id, data in enumerate(reader()): if self.__stop: return begin_event = BeginStepEvent(epoch_id, step_id) event_handler(begin_event) if begin_event.fetch_metrics: metrics = exe.run(feed=data, fetch_list=[ var.name for var in self.train_func_outputs ]) else: metrics = exe.run(feed=data, fetch_list=[]) event_handler(EndStepEvent(epoch_id, step_id, metrics)) event_handler(EndEpochEvent(epoch_id)) def _test_by_executor(self, reader, feed_order, fetch_list): with executor.scope_guard(self.scope): feed_var_list = build_feed_var_list(self.test_program, feed_order) feeder = data_feeder.DataFeeder( feed_list=feed_var_list, place=self.place) exe = executor.Executor(self.place) accumulated = len(fetch_list) * [0] count = 0 for data in reader(): outs = exe.run(program=self.test_program, feed=feeder.feed(data), fetch_list=fetch_list) accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)] count += 1 return [x / count for x in accumulated] def _train_by_parallel_executor(self, num_epochs, event_handler, reader, feed_order): with self._prog_and_scope_guard(): pe = self._get_or_create_parallel_executor() feed_var_list = build_feed_var_list(self.train_program, feed_order) feeder = data_feeder.DataFeeder( feed_list=feed_var_list, place=self.place) reader = feeder.decorate_reader(reader, multi_devices=True) self._train_by_any_executor(event_handler, pe, num_epochs, reader) def _get_parallel_executor(self): return getattr(self, 'parallel_executor', None) def _get_or_create_parallel_executor(self): if self._get_parallel_executor() is None: self.parallel_executor = parallel_executor.ParallelExecutor( use_cuda=isinstance(self.place, core.CUDAPlace), loss_name=self.train_func_outputs[0].name) return self._get_parallel_executor() def build_feed_var_list(program, feed_order): if not isinstance(program, framework.Program): raise TypeError("The 'program' should be an object of Program") if isinstance(feed_order, list): feed_var_list = [ program.global_block().var(var_name) for var_name in feed_order ] else: if not isinstance(feed_order, dict): raise TypeError( "The 'feed_order' should be either None, list or dict.") if not sorted(feed_order.values()) == range(len(feed_order)): raise ValueError( "The values of 'feed_order' should be a permutation of [0, len(feed_order))" ) sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1]) feed_var_list = [ program.global_block().var(pair[0]) for pair in sorted_pair_list ] return feed_var_list
14,176
37.008043
93
py
Paddle
Paddle-master/python/paddle/fluid/initializer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import framework import numpy as np import contextlib __all__ = [ 'Constant', 'Uniform', 'Normal', 'Xavier', 'force_init_on_cpu', 'init_on_cpu', 'ConstantInitializer', 'UniformInitializer', 'NormalInitializer', 'XavierInitializer' ] _force_init_on_cpu_ = False def force_init_on_cpu(): return _force_init_on_cpu_ @contextlib.contextmanager def init_on_cpu(): """ Switch program with `with` statement Examples: >>> with init_on_cpu(): >>> step = layers.create_global_var() """ global _force_init_on_cpu_ pre_state = force_init_on_cpu() _force_init_on_cpu_ = True yield _force_init_on_cpu_ = pre_state class Initializer(object): """Base class for variable initializers Defines the common interface of variable initializers. They add operations to the init program that are used to initialize variables. Users should not use this class directly, but need to use one of its implementations. """ def __init_(self): pass def __call__(self, param, block): """Add corresponding initialization operations to the network """ raise NotImplementedError() def _compute_fans(self, var): """Compute the fan_in and the fan_out for layers This method computes the fan_in and the fan_out for neural network layers, if not specified. It is not possible to perfectly estimate fan_in and fan_out. This method will estimate it correctly for matrix multiply and convolutions. Args: var: variable for which fan_in and fan_out have to be computed Returns: tuple of two integers (fan_in, fan_out) """ shape = var.shape if not shape or len(shape) == 0: fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: # This is the case for simple matrix multiply fan_in = shape[0] fan_out = shape[1] else: # Assume this to be a convolutional kernel # In PaddlePaddle, the shape of the kernel is like: # [num_filters, num_filter_channels, ...] where the remaining # dimensions are the filter_size receptive_field_size = np.prod(shape[2:]) fan_in = shape[1] * receptive_field_size fan_out = shape[0] * receptive_field_size return (fan_in, fan_out) class ConstantInitializer(Initializer): """Implements the constant initializer """ def __init__(self, value=0.0, force_cpu=False): """Constructor for ConstantInitializer Args: value: constant value to initialize the variable """ assert value is not None super(ConstantInitializer, self).__init__() self._value = value self._force_cpu = force_cpu def __call__(self, var, block): """Add constant initialization ops for a variable Args: var: Variable that needs to be initialized block: The block in which initialization ops should be added Returns: the initialization op """ assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) # Initialization Ops should be prepended and not appended op = block.prepend_op( type="fill_constant", outputs={"Out": var}, attrs={ "shape": var.shape, "dtype": int(var.dtype), "value": float(self._value), 'force_cpu': self._force_cpu or force_init_on_cpu() }) var.op = op return op class UniformInitializer(Initializer): """Implements the random uniform distribution initializer """ def __init__(self, low=-1.0, high=1.0, seed=0): """Constructor for UniformInitializer Args: low: lower boundary of the uniform distribution high: upper boundary of the uniform distribution seed: random seed """ assert low is not None assert high is not None assert high >= low assert seed is not None super(UniformInitializer, self).__init__() self._low = low self._high = high self._seed = seed def __call__(self, var, block): """Add uniform distribution initialization ops for a variable Args: var: Variable that needs to be initialized block: The block in which initialization ops should be added Returns: the initialization op """ assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) # Initialization Ops should be prepended and not appended if self._seed == 0: self._seed = block.program.random_seed op = block.prepend_op( type="uniform_random", outputs={"Out": var}, attrs={ "shape": var.shape, "dtype": int(var.dtype), "min": self._low, "max": self._high, "seed": self._seed }) var.op = op return op class NormalInitializer(Initializer): """Implements the random Normal(Gaussian) distribution initializer """ def __init__(self, loc=0.0, scale=1.0, seed=0): """Constructor for NormalInitializer Args: loc: mean of the normal distribution scale: standard deviation of the normal distribution seed: random seed """ assert loc is not None assert scale is not None assert seed is not None super(NormalInitializer, self).__init__() self._mean = loc self._std_dev = scale self._seed = seed def __call__(self, var, block): """Add normal distribution initialization ops for a variable Args: var: Variable that needs to be initialized block: The block in which initialization ops should be added Returns: the initialization op """ assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) # Initialization Ops should be prepended and not appended if self._seed == 0: self._seed = block.program.random_seed op = block.prepend_op( type="gaussian_random", outputs={"Out": var}, attrs={ "shape": var.shape, "dtype": int(var.dtype), "mean": self._mean, "std": self._std_dev, "seed": self._seed }) var.op = op return op class XavierInitializer(Initializer): """Implements the Xavier initializer This class implements the Xavier weight initializer from the paper Understanding the difficulty of training deep feedforward neural networks[1] by Xavier Glorot and Yoshua Bengio. This initializer is designed to keep the scale of the gradients approximately same in all the layers. In case of Uniform distribution, the range is [-x, x], where x = sqrt(6 / (fan_in + fan_out)). In case of Normal distribution, the mean is 0 and the standard deviation is sqrt(2/ (fan_in + fan_out)). References: [1] Understanding the difficulty of training deep feedforward neural networks. International conference on artificial intelligence and statistics. (http://proceedings.mlr.press/v9/glorot10a.html) """ def __init__(self, uniform=True, fan_in=None, fan_out=None, seed=0): """Constructor for XavierInitializer Args: uniform: whether to use uniform or normal distribution fan_in: fan_in for Xavier initialization. If None, it is inferred from the variable. fan_out: fan_out for Xavier initialization. If None, it is inferred from the variable. seed: random seed Note: It is recommended to set fan_in and fan_out to None for most cases. """ assert uniform is not None assert seed is not None super(XavierInitializer, self).__init__() self._uniform = uniform self._fan_in = fan_in self._fan_out = fan_out self._seed = seed def __call__(self, var, block): """Add xavier initialization ops for a variable Args: var: Variable that needs to be initialized block: The block in which initialization ops should be added Returns: the initialization op """ assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) f_in, f_out = self._compute_fans(var) # If fan_in and fan_out are passed, use them fan_in = f_in if self._fan_in is None else self._fan_in fan_out = f_out if self._fan_out is None else self._fan_out if self._seed == 0: self._seed = block.program.random_seed if self._uniform: limit = np.sqrt(6.0 / float(fan_in + fan_out)) op = block.prepend_op( type="uniform_random", outputs={"Out": var}, attrs={ "shape": var.shape, "dtype": int(var.dtype), "min": -limit, "max": limit, "seed": self._seed }) else: std = np.sqrt(2.0 / float(fan_in + fan_out)) op = block.prepend_op( type="gaussian_random", outputs={"Out": var}, attrs={ "shape": var.shape, "dtype": int(var.dtype), "mean": 0.0, "std": std, "seed": self._seed }) var.op = op return op class MSRAInitializer(Initializer): """Implements the MSRA initializer a.k.a. Kaiming Initializer This class implements the weight initialization from the paper Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification[1] by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a robust initialization method that particularly considers the rectifier nonlinearities. In case of Uniform distribution, the range is [-x, x], where x = sqrt(6 / fan_in). In case of Normal distribution, the mean is 0 and the standard deviation is sqrt(2/ fan_in). References: [1] Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification (https://arxiv.org/abs/1502.01852) """ def __init__(self, uniform=True, fan_in=None, seed=0): """Constructor for MSRAInitializer Args: uniform: whether to use uniform or normal distribution fan_in: fan_in for MSRAInitializer. If None, it is inferred from the variable. seed: random seed Note: It is recommended to set fan_in to None for most cases. """ assert uniform is not None assert seed is not None super(MSRAInitializer, self).__init__() self._uniform = uniform self._fan_in = fan_in self._seed = seed def __call__(self, var, block): """Add MSRA initialization ops for a variable Args: var: Variable that needs to be initialized block: The block in which initialization ops should be added Returns: the initialization op """ assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) f_in, f_out = self._compute_fans(var) # If fan_in is passed, use it fan_in = f_in if self._fan_in is None else self._fan_in if self._seed == 0: self._seed = block.program.random_seed if self._uniform: limit = np.sqrt(6.0 / float(fan_in)) op = block.prepend_op( type="uniform_random", outputs={"Out": var}, attrs={ "shape": var.shape, "dtype": int(var.dtype), "min": -limit, "max": limit, "seed": self._seed }) else: std = np.sqrt(2.0 / float(fan_in)) op = block.prepend_op( type="gaussian_random", outputs={"Out": var}, attrs={ "shape": var.shape, "dtype": int(var.dtype), "mean": 0.0, "std": std, "seed": self._seed }) var.op = op return op # We short the class name, since users will use the initializer with the package # name. The sample code: # # import paddle.fluid as fluid # # hidden = fluid.layers.fc(..., # param_attr=ParamAttr(fluid.initializer.Xavier())) # # It is no need to add an `Initializer` as the class suffix Constant = ConstantInitializer Uniform = UniformInitializer Normal = NormalInitializer Xavier = XavierInitializer MSRA = MSRAInitializer
14,199
31.346241
80
py
Paddle
Paddle-master/python/paddle/fluid/optimizer.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from collections import defaultdict from paddle.fluid.framework import Program import framework import layers from backward import append_backward from framework import program_guard import unique_name from initializer import Constant from layer_helper import LayerHelper from regularizer import append_regularization_ops from clip import append_gradient_clip_ops, error_clip_callback from contextlib import contextmanager __all__ = [ 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer', 'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'RMSPropOptimizer', 'Adadelta', 'ModelAverage', 'Optimizer' ] class Optimizer(object): """Optimizer Base class. Define the common interface of an optimizer. User should not use this class directly, but need to use one of it's implementation. """ def __init__(self, learning_rate, regularization=None): if not isinstance(learning_rate, float) and \ not isinstance(learning_rate, framework.Variable): raise TypeError("learning rate should be float or Variable") self.regularization = regularization self._learning_rate = learning_rate # the learning rate type should be inferenced from loss self._dtype = None # each program should have a independent learning rate # program -> Variable(learning_rate) self._learning_rate_map = dict() if isinstance(self._learning_rate, framework.Variable): self._learning_rate_map[framework.default_main_program( )] = self._learning_rate # Dictionary of accumulators. Some optimizer subclasses need to # allocate and manage extra variables associated with the parameters # to train. These variables are called accumulators. # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...} self._accumulators = defaultdict(lambda: dict()) self.helper = None def _create_global_learning_rate(self): lr = self.global_learning_rate() if isinstance(lr, framework.Variable): return else: if not isinstance(self._learning_rate, float): raise TypeError( "learning rate variable is create outside optimizer," "can not create new learning rate variable for new program") # create learning rate in the current main program self._learning_rate_map[framework.default_main_program( )] = layers.create_global_var( name=unique_name.generate("learning_rate"), shape=[1], value=float(self._learning_rate), dtype='float32' if self._dtype == None else self._dtype, persistable=True) def global_learning_rate(self, program=None): """ get global decayed learning rate :return: """ if program is None: program = framework.default_main_program() return self._learning_rate_map.get(program, None) def _append_optimize_op(self, block, param_and_grad): """ append optimize operator to block and return all the added optimize_op """ raise NotImplementedError() def _create_param_lr(self, param_and_grad): # create learning rate variable for every parameter param = param_and_grad[0] param_lr = param.optimize_attr['learning_rate'] if param_lr == 1.0: return self.global_learning_rate() else: return self.global_learning_rate() * param_lr def _create_accumulators(self, block, parameters): """Create all accumulators needed by the parameters Args: block: the block in which the loss variable is present parameters: list of parameter variables for the optimizer """ pass def _finish_update(self, block): """Finish any custom updates needed before completing an optimization step Args: block: the block in which the loss variable is present parameters: list of parameter variables for the optimizer Returns: list of finish ops or None """ pass def _add_accumulator(self, name, param, dtype=None, fill_value=0.0, shape=None): """Utility function to add an accumulator for a parameter Args: block: the block in which the loss variable is present name: name of the accumulator param: parameter variable for which accumulator is to be added dtype: data type of the accumulator variable fill_value: value to initialize the accumulator variable """ if (name in self._accumulators and param.name in self._accumulators[name]): raise Exception("Accumulator {} already exists for parameter {}". format(name, param.name)) if shape == None: shape = param.shape assert isinstance(self.helper, LayerHelper) var = self.helper.create_global_variable( name=unique_name.generate(name), persistable=True, dtype=dtype or param.dtype, type=param.type, shape=shape) self.helper.set_variable_initializer( var, initializer=Constant(value=float(fill_value))) self._accumulators[name][param.name] = var return var def _get_accumulator(self, name, param): """Utility function to fetch an accumulator for a parameter Args: name: name of the accumulator param: parameter variable for which accumulator is to be fetched Returns: accumulator variable for the parameter """ if (name not in self._accumulators or param.name not in self._accumulators[name]): raise Exception("Accumulator {} does not exist for parameter {}". format(name, param.name)) return self._accumulators[name][param.name] def create_optimization_pass(self, parameters_and_grads, loss, startup_program=None): """Add optimization operators to update gradients to variables. Args: loss: the target that this optimization is for. parameters_and_grads: a list of (variable, gradient) pair to update. Returns: return_op_list: a list of operators that will complete one step of optimization. This will include parameter update ops, global step update ops and any other custom ops required by subclasses to manage their internal state. :param startup_program: """ # This is a default implementation of create_optimization_pass that # can be shared by most optimizers. This implementation assumes that # the subclass will implement the _append_optimize_op method and the # _initialize_tensors method. The subclass can extend the # _create_accumulators method if it needs to create accumulators # for parameters and extend _finish_update method to add custom ops. # Create any accumulators program = loss.block.program self._dtype = loss.dtype with program_guard(program, startup_program): global_block = framework.default_main_program().global_block() start = len(global_block.ops) self.helper = LayerHelper(self.__class__.__name__) self._create_accumulators(loss.block, [p[0] for p in parameters_and_grads]) self._create_global_learning_rate() optimize_ops = [] for param_and_grad in parameters_and_grads: with param_and_grad[0].block.program.optimized_guard( param_and_grad[0]): if param_and_grad[0].trainable is True and param_and_grad[ 1] is not None: optimize_op = self._append_optimize_op(loss.block, param_and_grad) optimize_ops.append(optimize_op) # Get custom finish ops for subclasses # FIXME: Need to fix this once we figure out how to handle dependencies self._finish_update(loss.block) end = len(global_block.ops) return global_block.slice_ops(start, end) def minimize(self, loss, startup_program=None, parameter_list=None, no_grad_set=None): """Add operations to minimize `loss` by updating `parameter_list`. This method combines interface `append_backward()` and `create_optimization_pass()` into one. """ params_grads = append_backward(loss, parameter_list, no_grad_set, [error_clip_callback]) params_grads = sorted(params_grads, key=lambda x: x[0].name) params_grads = append_gradient_clip_ops(params_grads) # Add regularization if any params_grads = append_regularization_ops(params_grads, self.regularization) optimize_ops = self.create_optimization_pass(params_grads, loss, startup_program) return optimize_ops, params_grads class SGDOptimizer(Optimizer): """ Simple SGD optimizer without any state. """ def __init__(self, learning_rate, **kwargs): assert learning_rate is not None super(SGDOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) self.type = "sgd" def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) # create the optimize op sgd_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "LearningRate": self._create_param_lr(param_and_grad) }, outputs={"ParamOut": param_and_grad[0]}) return sgd_op class MomentumOptimizer(Optimizer): """Simple Momentum optimizer with velocity state """ _velocity_acc_str = "velocity" def __init__(self, learning_rate, momentum, use_nesterov=False, **kwargs): assert learning_rate is not None assert momentum is not None super(MomentumOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) self.type = "momentum" self._momentum = momentum self._use_nesterov = bool(use_nesterov) def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) for p in parameters: self._add_accumulator(self._velocity_acc_str, p) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) velocity_acc = self._get_accumulator(self._velocity_acc_str, param_and_grad[0]) # create the momentum optimize op momentum_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "Velocity": velocity_acc, "LearningRate": self._create_param_lr(param_and_grad) }, outputs={ "ParamOut": param_and_grad[0], "VelocityOut": velocity_acc }, attrs={"mu": self._momentum, "use_nesterov": self._use_nesterov}) return momentum_op class AdagradOptimizer(Optimizer): """Simple Adagrad optimizer with moment state """ _moment_acc_str = "moment" def __init__(self, learning_rate, epsilon=1.0e-6, **kwargs): assert learning_rate is not None assert epsilon is not None super(AdagradOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) self.type = "adagrad" self._epsilon = epsilon def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) for p in parameters: self._add_accumulator(self._moment_acc_str, p) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) moment_acc = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) # Create the adagrad optimizer op adagrad_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "Moment": moment_acc, "LearningRate": self._create_param_lr(param_and_grad) }, outputs={"ParamOut": param_and_grad[0], "MomentOut": moment_acc}, attrs={"epsilon": self._epsilon}) return adagrad_op class AdamOptimizer(Optimizer): """Implements the Adam Optimizer """ _moment1_acc_str = "moment1" _moment2_acc_str = "moment2" def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, **kwargs): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None super(AdamOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) self.type = "adam" self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) main_block = block.program.global_block() # Create beta1 and beta2 power tensors beta_shape = [1] self._beta1_pow_acc = self.helper.create_global_variable( name=unique_name.generate('beta1_pow_acc'), dtype='float32' if self._dtype == None else self._dtype, shape=beta_shape, lod_level=0, persistable=True) self.helper.set_variable_initializer( self._beta1_pow_acc, initializer=Constant(self._beta1)) self._beta2_pow_acc = self.helper.create_global_variable( name=unique_name.generate('beta2_pow_acc'), dtype='float32' if self._dtype == None else self._dtype, shape=beta_shape, lod_level=0, persistable=True) self.helper.set_variable_initializer( self._beta2_pow_acc, initializer=Constant(self._beta2)) # Create accumulator tensors for first and second moments for p in parameters: self._add_accumulator(self._moment1_acc_str, p) self._add_accumulator(self._moment2_acc_str, p) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) moment1 = self._get_accumulator(self._moment1_acc_str, param_and_grad[0]) moment2 = self._get_accumulator(self._moment2_acc_str, param_and_grad[0]) # create the adam optimize op adam_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "LearningRate": self._create_param_lr(param_and_grad), "Moment1": moment1, "Moment2": moment2, "Beta1Pow": self._beta1_pow_acc, "Beta2Pow": self._beta2_pow_acc }, outputs={ "ParamOut": param_and_grad[0], "Moment1Out": moment1, "Moment2Out": moment2 }, attrs={ "beta1": self._beta1, "beta2": self._beta2, "epsilon": self._epsilon }) return adam_op def _finish_update(self, block): """Update Beta1 and Beta2 Power accumulators """ assert isinstance(block, framework.Block) main_block = block.program.global_block() scale_beta1 = main_block.append_op( type="scale", inputs={"X": self._beta1_pow_acc}, outputs={"Out": self._beta1_pow_acc}, attrs={"scale": self._beta1}) scale_beta2 = main_block.append_op( type="scale", inputs={"X": self._beta2_pow_acc}, outputs={"Out": self._beta2_pow_acc}, attrs={"scale": self._beta2}) return [scale_beta1, scale_beta2] class AdamaxOptimizer(Optimizer): """Implements the Adamax Optimizer """ _moment_acc_str = "moment" _inf_norm_acc_str = "inf_norm" def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, **kwargs): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None super(AdamaxOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) self.type = "adamax" self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon def _create_accumulators(self, block, parameters): # Create beta1 power accumulator tensor beta_shape = [1] self._beta1_pow_acc = self.helper.create_global_variable( name=unique_name.generate('beta1_pow_acc'), dtype='float32' if self._dtype == None else self._dtype, shape=beta_shape, lod_level=0, persistable=True) self.helper.set_variable_initializer( self._beta1_pow_acc, initializer=Constant(self._beta1)) # Create accumulator tensors for first moment and infinity norm for p in parameters: self._add_accumulator(self._moment_acc_str, p) self._add_accumulator(self._inf_norm_acc_str, p) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) inf_norm = self._get_accumulator(self._inf_norm_acc_str, param_and_grad[0]) # create the adamax optimize op adamax_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "LearningRate": self._create_param_lr(param_and_grad), "Moment": moment, "InfNorm": inf_norm, "Beta1Pow": self._beta1_pow_acc }, outputs={ "ParamOut": param_and_grad[0], "MomentOut": moment, "InfNormOut": inf_norm }, attrs={ "beta1": self._beta1, "beta2": self._beta2, "epsilon": self._epsilon }) return adamax_op def _finish_update(self, block): """Update Beta1 Power accumulator """ assert isinstance(block, framework.Block) main_block = block.program.global_block() scale_beta1 = main_block.append_op( type="scale", inputs={"X": self._beta1_pow_acc}, outputs={"Out": self._beta1_pow_acc}, attrs={"scale": self._beta1}) return [scale_beta1] class DecayedAdagradOptimizer(Optimizer): """Simple Decayed Adagrad optimizer with moment state """ _moment_acc_str = "moment" def __init__(self, learning_rate, decay=0.95, epsilon=1.0e-6, **kwargs): assert learning_rate is not None assert decay is not None assert epsilon is not None super(DecayedAdagradOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) self.type = "decayed_adagrad" self._decay = decay self._epsilon = epsilon def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) for p in parameters: self._add_accumulator(self._moment_acc_str, p) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) moment_acc = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) # Create the decayed adagrad optimizer op decayed_adagrad_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "Moment": moment_acc, "LearningRate": self._create_param_lr(param_and_grad) }, outputs={"ParamOut": param_and_grad[0], "MomentOut": moment_acc}, attrs={"epsilon": self._epsilon}) return decayed_adagrad_op class AdadeltaOptimizer(Optimizer): """ **Adadelta Optimizer** Simple Adadelta optimizer with average squared grad state and average squared update state. The details of adadelta please refer to this `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD <http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_. .. math:: E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\ learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\ E(g_t^2) + \\epsilon ) ) \\\\ E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2 Args: learning_rate(float): global leraning rate rho(float): rho in equation epsilon(float): epsilon in equation Examples: .. code-block:: python optimizer = fluid.optimizer.Adadelta( learning_rate=0.0003, epsilon=1.0e-6, rho=0.95) _, params_grads = optimizer.minimize(cost) """ _avg_squared_grad_acc_str = "_avg_squared_grad" _avg_squared_update_acc_str = "_avg_squared_update" def __init__(self, learning_rate, epsilon=1.0e-6, rho=0.95, **kwargs): if learning_rate is None: raise ValueError("learning_rate is not set.") if epsilon is None: raise ValueError("epsilon is not set.") if rho is None: raise ValueError("rho is not set.") super(AdadeltaOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) self.type = "adadelta" self._epsilon = epsilon self._rho = rho def _create_accumulators(self, block, parameters): if not isinstance(block, framework.Block): raise TypeError("block is not instance of framework.Block.") for p in parameters: self._add_accumulator(self._avg_squared_grad_acc_str, p) self._add_accumulator(self._avg_squared_update_acc_str, p) def _append_optimize_op(self, block, param_and_grad): if not isinstance(block, framework.Block): raise TypeError("block is not instance of framework.Block.") avg_squared_grad_acc = self._get_accumulator( self._avg_squared_grad_acc_str, param_and_grad[0]) avg_squared_update_acc = self._get_accumulator( self._avg_squared_update_acc_str, param_and_grad[0]) # Create the adadelta optimizer op adadelta_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "AvgSquaredGrad": avg_squared_grad_acc, "AvgSquaredUpdate": avg_squared_update_acc }, outputs={ "ParamOut": param_and_grad[0], "AvgSquaredGradOut": avg_squared_grad_acc, "AvgSquaredUpdateOut": avg_squared_update_acc }, attrs={"epsilon": self._epsilon, "rho": self._rho}) return adadelta_op class RMSPropOptimizer(Optimizer): """ Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method. The original slides proposed RMSProp: Slide 29 of http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf . The original equation is as follows: .. math:: r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\ w & = w - \\frac{\\eta} {\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w) The first equation calculates moving average of the squared gradient for each weight. Then dividing the gradient by :math: `sqrt{v(w,t)}`. In some cases, adding a momentum term :math: `\\beta` is beneficial. In our implementation, Nesterov momentum is used: .. math:: r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\ v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{v(w,t) + \\epsilon}} \\nabla Q_{i}(w) w & = w - v(w, t) where, :math: `\\rho` is a hyperparameter and typical values are 0.9, 0.95 and so on. :math: `beta` is the momentum term. :math: `\\epsilon` is a smoothing term to avoid division by zero, usually set somewhere in range from 1e-4 to 1e-8. Args: learning_rate(float): global leraning rate. rho(float): rho is :math: `\\rho` in equation, set 0.95 by default. epsilon(float): :math: `\\epsilon` in equation is smoothing term to avoid division by zero, set 1e-6 by default. momentum(float): :math: `\\beta` in equation is the momentum term, set 0.0 by default. Raises: ValueError: If learning_rate, rho, epsilon, momentum are None. Examples: .. code-block:: python optimizer = fluid.optimizer.RMSProp(0.0001) _, params_grads = optimizer.minimize(cost) """ _momentum_acc_str = "momentum" _mean_square_acc_str = "mean_square" def __init__(self, learning_rate, rho=0.95, epsilon=1.0e-6, momentum=0.0, **kwargs): super(RMSPropOptimizer, self).__init__( learning_rate=learning_rate, **kwargs) if learning_rate is None: raise ValueError("learning_rate is not set.") if rho is None: raise ValueError("rho is not set.") if epsilon is None: raise ValueError("epsilon is not set.") if momentum is None: raise ValueError("momentum is not set.") self.type = "rmsprop" self._rho = rho self._epsilon = epsilon self._momentum = momentum def _create_accumulators(self, block, parameters): if not isinstance(block, framework.Block): raise TypeError("block is not instance of framework.Block.") for p in parameters: self._add_accumulator(self._momentum_acc_str, p) self._add_accumulator(self._mean_square_acc_str, p) def _append_optimize_op(self, block, param_and_grad): if not isinstance(block, framework.Block): raise TypeError("block is not instance of framework.Block.") momentum_acc = self._get_accumulator(self._momentum_acc_str, param_and_grad[0]) mean_square_acc = self._get_accumulator(self._mean_square_acc_str, param_and_grad[0]) rmsprop_op = block.append_op( type=self.type, inputs={ "Param": param_and_grad[0], "Grad": param_and_grad[1], "Moment": momentum_acc, "MeanSquare": mean_square_acc, "LearningRate": self._create_param_lr(param_and_grad), }, outputs={ "ParamOut": param_and_grad[0], "MomentOut": momentum_acc, "MeanSquareOut": mean_square_acc }, attrs={ "epsilon": self._epsilon, "decay": self._rho, "momentum": self._momentum }) return rmsprop_op # We short the class name, since users will use the optimizer with the package # name. The sample code: # # import paddle.fluid as fluid # # sgd = fluid.optimizer.SGD(...) # # It is no need to add an `Optimizer` as the class suffix SGD = SGDOptimizer Momentum = MomentumOptimizer Adagrad = AdagradOptimizer Adam = AdamOptimizer Adamax = AdamaxOptimizer DecayedAdagrad = DecayedAdagradOptimizer Adadelta = AdadeltaOptimizer RMSProp = RMSPropOptimizer class ModelAverage(Optimizer): """Accumulate the average of parameters whtin sliding window. The average result will be saved in temporary variables which can be applied to parameter variables of current model by calling 'apply()' method. And the 'restore()' method is used to restored the parameter values of current model. The size of average window is determined by average_window_rate, min_average_window, max_average_window and current update times. Args: average_window_rate: The rate of average window. params_grads: A list of parameter-grad variable pairs. min_average_window: The minimum size of average window. max_average_window: The maximum size of average window. Examples: ... optimizer = fluid.optimizer.Momentum() _, params_grads = optimizer.minimize(cost) model_average = fluid.optimizer.ModelAverage(params_grads, 0.15, min_average_window=10000, max_average_window=20000) for pass_id in range(args.pass_num): for data in train_reader(): exe.run(fluid.default_main_program()...) with model_average.apply(exe): for data in test_reader(): exe.run(inference_program...) """ def __init__(self, average_window_rate, params_grads=None, min_average_window=10000, max_average_window=10000, **kwargs): super(ModelAverage, self).__init__(0.0, **kwargs) self.average_window = average_window_rate self.min_average_window = min_average_window self.max_average_window = max_average_window self.params_grads = [] if params_grads is None else params_grads params = {} for param, grad in self.params_grads: if param.do_model_average != False: params[param.name] = (param, grad) for param in framework.default_main_program().global_block( ).all_parameters(): if param.name not in params and param.do_model_average != False: grad = param.block.create_var( name=unique_name.generate(".".join([param.name, 'tmp'])), dtype=param.dtype, persistable=False, stop_gradient=True) params[param.name] = (param, grad) self.params_grads = params.values() for param, grad in self.params_grads: self._append_average_accumulate_op(param) self.apply_program = Program() block = self.apply_program.global_block() with program_guard(main_program=self.apply_program): for param_grad in self.params_grads: self._add_average_apply_op(block, param_grad) self.restore_program = Program() block = self.restore_program.global_block() with program_guard(main_program=self.restore_program): for param_grad in self.params_grads: self._add_average_restore_op(block, param_grad) def _add_average_apply_op(self, block, param_grad): param = block.clone_variable(param_grad[0]) grad = block.clone_variable(param_grad[1]) sum_1 = block.clone_variable(self._get_accumulator('sum_1', param)) sum_2 = block.clone_variable(self._get_accumulator('sum_2', param)) sum_3 = block.clone_variable(self._get_accumulator('sum_3', param)) num_accumulates = block.clone_variable( self._get_accumulator('num_accumulates', param)) old_num_accumulates = block.clone_variable( self._get_accumulator('old_num_accumulates', param)) num_updates = block.clone_variable( self._get_accumulator('num_updates', param)) # backup param value to grad layers.assign(input=param, output=grad) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) tmp = layers.cast( x=tmp, dtype='float32' if self._dtype == None else self._dtype) sum = layers.cast( x=sum, dtype='float32' if self._dtype == None else self._dtype) layers.elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param_grad): param = block.clone_variable(param_grad[0]) grad = block.clone_variable(param_grad[1]) layers.assign(input=grad, output=param) def _append_average_accumulate_op(self, param): self.helper = LayerHelper("average_accumulate") sum_1 = self._add_accumulator('sum_1', param) sum_2 = self._add_accumulator('sum_2', param) sum_3 = self._add_accumulator('sum_3', param) num_accumulates = self._add_accumulator( 'num_accumulates', param, dtype='int64', shape=[1]) old_num_accumulates = self._add_accumulator( 'old_num_accumulates', param, dtype='int64', shape=[1]) num_updates = self._add_accumulator( 'num_updates', param, dtype='int64', shape=[1]) self.helper.append_op( type='average_accumulates', inputs={ "param": param, "in_sum_1": sum_1, "in_sum_2": sum_2, "in_sum_3": sum_3, "in_num_accumulates": num_accumulates, "in_old_num_accumulates": old_num_accumulates, "in_num_updates": num_updates }, outputs={ "out_sum_1": sum_1, "out_sum_2": sum_2, "out_sum_3": sum_3, "out_num_accumulates": num_accumulates, "out_old_num_accumulates": old_num_accumulates, "out_num_updates": num_updates, }, attrs={ "average_window": self.average_window, "min_average_window": self.min_average_window, "max_average_window": self.max_average_window, }) @contextmanager def apply(self, executor, need_restore=True): """Apply average values to parameters of current model. """ executor.run(self.apply_program) try: yield finally: if need_restore: self.restore(executor) def restore(self, executor): """Restore parameter values of current model. """ executor.run(self.restore_program)
36,570
36.663234
83
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/distribute_transpiler_simple.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..framework import Program, default_main_program, Parameter, Variable from ..layer_helper import LayerHelper def hash_name_to_server(params_grads, pserver_endpoints): """ :param param_grads: :return: a map of pserver endpoint -> params -> [param list] grads -> [grad list] """ def _hash_param(param_name, total): return hash(param_name) % total param_grad_map = dict() for param, grad in params_grads: if param.trainable is True and grad is not None: server_id = _hash_param(param.name, len(pserver_endpoints)) server_for_param = pserver_endpoints[server_id] if not param_grad_map.has_key(server_for_param): param_grad_map[server_for_param] = {"params": [], "grads": []} param_grad_map[server_for_param]["params"].append(param) param_grad_map[server_for_param]["grads"].append(grad) return param_grad_map def round_robin(params_grads, pserver_endpoints): assert (len(params_grads) > len(pserver_endpoints)) param_grad_map = dict() pserver_idx = 0 for param, grad in params_grads: if param.trainable is True: server_for_param = pserver_endpoints[pserver_idx] if not param_grad_map.has_key(server_for_param): param_grad_map[server_for_param] = {"params": [], "grads": []} param_grad_map[server_for_param]["params"].append(param) param_grad_map[server_for_param]["grads"].append(grad) pserver_idx += 1 if pserver_idx >= len(pserver_endpoints): pserver_idx = 0 return param_grad_map class SimpleDistributeTranspiler: def transpile(self, optimize_ops, params_grads, program=None, pservers="127.0.0.1:6174", trainers=1, split_method=round_robin): """ Transpile the program to a distributed data-parallelism programs. The main_program will be transform to use a remote parameter server to do parameter optimization. And the optimization graph will be put in to a parameter server program. Use different methods to split trainable varialbles to different parameter servers. Example to run: exe = fluid.Executor(place) t = fluid.DistributeTranspiler() t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) pserver_endpoint = os.getenv("PSERVER") if pserver_endpoint: pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) exe.run(fluid.default_startup_program()) exe.run(pserver_prog) else: feeder = fluid.DataFeeder(feed_list=[images, label], place=place) exe.run(fluid.default_startup_program()) for pass_id in range(PASS_NUM): ... :param optimize_ops: op list of optimization, should be the return value of Optimizer.minimize :type optimize_ops: list :param program: program to optimize, default default_main_program :param pservers: parameter server endpoints like "m1:6174,m2:6174" :type pservers: string :return: return a list of programs """ if program is None: program = default_main_program() self.program = program self.trainers = trainers self.optimize_ops = optimize_ops self._optimize_distributed( optimize_ops, program, params_grads, pservers=pservers, trainers=trainers, split_method=split_method) def _clone_param(self, block, v): assert isinstance(v, Parameter) new_p = Parameter( block=block, shape=v.shape, dtype=v.dtype, type=v.type, lod_level=v.lod_level, stop_gradient=v.stop_gradient, trainable=v.trainable, optimize_attr=v.optimize_attr, regularizer=v.regularizer, name=v.name) block.vars[new_p.name] = new_p def _clone_var(self, block, var): assert isinstance(var, Variable) return block.create_var( name=var.name, shape=var.shape, dtype=var.dtype, type=var.type, lod_level=var.lod_level, persistable=var.persistable) def _optimize_distributed(self, optimize_ops, program, params_and_grads, **kwargs): if kwargs.has_key("split_method"): split_method = kwargs["split_method"] else: split_method = round_robin assert (callable(split_method)) pserver_endpoints = kwargs["pservers"].split(",") self.param_grad_map = split_method(params_and_grads, pserver_endpoints) send_op_ordered_inputs = [] send_op_ordered_outputs = [] epmap = [] for ep, v in self.param_grad_map.iteritems(): send_op_ordered_inputs.extend(v["grads"]) send_op_ordered_outputs.extend(v["params"]) for i in v["grads"]: epmap.append(ep) send_op = program.global_block().append_op( type="send", inputs={"X": send_op_ordered_inputs }, # inputs is a list of tensors to be send outputs={"Out": send_op_ordered_outputs}, attrs={"endpoints": pserver_endpoints, "epmap": epmap}) def get_trainer_program(self): # remove optimize ops and add a send op to main_program self.program.global_block().delete_ops(self.optimize_ops) return self.program def _create_var_for_trainers(self, block, var, trainers): var_list = [] for i in xrange(trainers): var_each = block.create_var( name="%s.trainer_%d" % (var.name, i), psersistable=var.persistable, dtype=var.dtype, shape=var.shape) var_list.append(var_each) return var_list def get_pserver_program(self, endpoint, optimize_ops): pserver_program = Program() for v in self.param_grad_map[endpoint]["params"]: self._clone_param(pserver_program.global_block(), v) optimize_sub_program = Program() grad_var_names = [ var.name for var in self.param_grad_map[endpoint]["grads"] ] for opt_op in optimize_ops: for _, var in opt_op.inputs.iteritems(): # NOTE: append operators to merge gradients from multiple # trainers. If trainers == 1, this is not needed. if self.trainers > 1 and var.name in grad_var_names: vars2merge = self._create_var_for_trainers( optimize_sub_program.global_block(), var, self.trainers) merged_var = optimize_sub_program.global_block().create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, shape=var.shape) optimize_sub_program.global_block().append_op( type="sum", inputs={"X": vars2merge}, outputs={"Out": merged_var}) optimize_sub_program.global_block().append_op( type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, attrs={"scale": 1.0 / float(self.trainers)}) else: optimize_sub_program.global_block().create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, shape=var.shape) if opt_op.inputs.has_key("Grad"): if opt_op.inputs["Grad"].name in grad_var_names: optimize_sub_program.global_block().append_op( type=opt_op.type, inputs=opt_op.inputs, outputs=opt_op.outputs, attrs=opt_op.attrs) else: optimize_sub_program.global_block().append_op( type=opt_op.type, inputs=opt_op.inputs, outputs=opt_op.outputs, attrs=opt_op.attrs) pserver_program.global_block().append_op( type="recv", inputs={"RX": self.param_grad_map[endpoint]["grads"]}, # grads to recv outputs={}, attrs={ "OptimizeBlock": optimize_sub_program.global_block(), "endpoint": endpoint, "ParamList": [p.name for p in self.param_grad_map[endpoint]["params"]], "GradList": [p.name for p in self.param_grad_map[endpoint]["grads"]], "Trainers": self.trainers }) pserver_program.sync_with_cpp() return pserver_program
10,044
38.392157
90
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/inference_transpiler.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from .. import core from ..framework import Program from ..executor import global_scope class InferenceTranspiler: def transpile(self, program, place, scope=None): ''' Transpile the program. Support only fuse batch normalization now. :param program: program to transpile :type program: Program :param place: inference place :type place: Place :param scope: inference scope :type scope: Scope or None ''' if not isinstance(program, Program): raise TypeError("program should be as Program type") if not isinstance(place, core.CPUPlace) and not isinstance( place, core.CUDAPlace): raise TypeError("place should be as CPUPlace/CUDAPlace type") if scope is None: scope = global_scope() if not isinstance(scope, core.Scope): raise TypeError("scope should be as Scope type or None") self.fuse_batch_norm(program, place, scope) def fuse_batch_norm(self, program, place, scope): ''' Transpile the program by fused batch normalization. The batch normalization followed the convolution or fully connected layer can be integrated with them. Doing so will give us a forward acceleration, especially in environments like mobile or embedded. For input X: - Conv process: X = input * W + bias - Batch norm process: X' = (X - mean) / std - Scale Process: Y = a * X' + b After fuse into one operation: Y = (input * W + bias - mean) / std * a + b = input * a * W / std + ((bias - mean) / std * a + b) The operator transformation is: - before: - conv->batch_norm->any_other_op (bias == 0) - conv->elementwise_add->batch_norm->any_other_op (bias != 0) - after: - conv->elementwise_add->any_other_op The transpile stages are: 1. insert elementwise_add op when bias == 0. 2. fuse the batch_norm's parameters to conv and elementwise_add operators. 3. remove batch_norm ops which are not used in any other ops. 4. adjust the input of any_other_op to be the output of elementwise_add operator. 5. remove unused variables. :param program: program to transpile :type program: Program :param place: inference place :type place: Place :param scope: inference scope :type scope: Scope ''' self.scope = scope self.place = place self.block = program.block(0) self.input_map = {} # store the input names should be adjusted i = 0 while i < len(self.block.ops): current_op = self.block.ops[i] # TODO(luotao1): consider only conv2d now. fc would be delt later. if current_op.type in ['conv2d']: # TODO(luotao1): consider single chain network now. # For branch network, we counldn't use block.ops[i + 1] as # the judgment condition. next_op = self.block.ops[i + 1] # conv2d without bias if (next_op.type == 'batch_norm'): # insert bias op bias_op = self._insert_bias_op(i + 1, current_op, next_op) # fuse batch_norm self._fuse_param(current_op, next_op, bias_op, 0) # remove batch_norm_op self.block.remove_op(i + 2) i = i + 1 # conv2d with bias, the next_op.type is elementwise_add elif (next_op.type == 'elementwise_add'): next_next_op = self.block.ops[i + 2] if (next_next_op.type == 'batch_norm'): # fuse batch_norm self._fuse_param(current_op, next_next_op, next_op, 1) # remove batch_norm_op self.block.remove_op(i + 2) i = i + 1 i = i + 1 self._adjust_input() self._remove_unused_var() # TODO(luotao): use clone() method to flush the program.desc in force, # since some large program.desc will not be flushed immediately. # And a better solution will be considered later. program = program.clone() # ====================== private transpiler functions ===================== def _insert_bias_op(self, index, current_op, bn_op): ''' Construct elementwise_add operator for adding bias and insert it into program. :param index: insert location of bias_op :type index: Int :param current_op: current operator (conv or fc) :type current_op: Operator :param bn_op: batch norm operator :type bn_op: Operator :return: bias_op :rtype: Operator ''' # The input of bias_op is current_op's output and Bias of bn_op # The output of bias_op is bn_op's output x_var = self.block.var(current_op.output("Output")[0]) y_var = self.block.var(bn_op.input("Bias")[0]) out_var = self.block.var(bn_op.output("Y")[0]) bias_op = self.block.insert_op( index, type="elementwise_add", inputs={"X": x_var, "Y": y_var}, outputs={"Out": out_var}, attrs={"axis": 1}) # dim_start=1 return bias_op def _fuse_param(self, current_op, bn_op, bias_op, with_bias): ''' fuse the batch_norm_op' parameters to current_op (conv or fc) :param current_op: current operator (conv or fc) :type current_op: Operator :param bn_op: batch norm operator :type bn_op: Operator :param bias_op: elementwise_add operator for adding bias :type bias_op: Operator :param with_bias: If current operator has bias, with_bias = 1; otherwise 0. :type with_bias: Int ''' def _update_param(op, old_param_name, new_param): # For the sake of remaining the original variables the same as before, # create new variables in scope to store the new parameters. old_param_name = old_param_name[0] old_var = self.block.vars[old_param_name] new_param_name = old_param_name + '_fuse_bn' new_var = self.block.create_parameter( name=new_param_name.encode('ascii'), type=old_var.type, dtype=old_var.dtype, shape=old_var.shape) op.rename_input(old_param_name, new_param_name) self.scope.var(new_param_name) tensor = self.scope.find_var(new_param_name).get_tensor() tensor.set(np.array(new_param), self.place) def _load_param(param_name): return np.array(self.scope.find_var(param_name[0]).get_tensor()) bias_bn = _load_param(bn_op.input("Bias")) #Bias scale_bn = _load_param(bn_op.input("Scale")) #Scale mean_bn = _load_param(bn_op.input("Mean")) #Mean var_bn = _load_param(bn_op.input("Variance")) #Variance # TODO(luotao1): consider only conv2d now. fc would be delt later. current_param = _load_param(current_op.input("Filter")) std_bn = np.float32(np.sqrt(np.add(var_bn, 1e-5))) tmp = np.float32(np.divide(scale_bn, std_bn)) # add bias of batch_norm_op to conv2d if with_bias: bias = _load_param(bias_op.input("Y")) else: bias = np.zeros(bias_bn.shape) bias = np.float32( np.add(np.multiply(np.subtract(bias, mean_bn), tmp), bias_bn)) # re-compute weight of conv2d tmp = tmp.reshape(tmp.shape[0], -1) dst_param = current_param.reshape((tmp.shape[0], -1)) dst_param = np.float32(np.multiply(dst_param, tmp)) dst_param = dst_param.reshape(current_param.shape) # update parameters _update_param(current_op, current_op.input("Filter"), dst_param) _update_param(bias_op, bias_op.input("Y"), bias) # collect the renamed input self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0] def _adjust_input(self): for i in range(len(self.block.ops)): current_op = self.block.ops[i] for input_arg in current_op.input_arg_names: if input_arg in self.input_map: current_op.rename_input(input_arg, self.input_map[input_arg]) def _remove_unused_var(self): ''' remove unused varibles in program ''' args = [] for i in range(len(self.block.ops)): current_op = self.block.ops[i] args += current_op.input_arg_names args += current_op.output_arg_names args = list(set(args)) # unique the input and output arguments for var in self.block.vars.keys(): if var not in args: self.block.remove_var(var)
9,828
39.784232
89
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/memory_optimization_transpiler.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from .. import core from ..framework import Program, default_main_program, Parameter, Variable from ..backward import _rename_arg_ dtype_to_size = { core.VarDesc.VarType.FP16: 2, core.VarDesc.VarType.FP32: 4, core.VarDesc.VarType.FP64: 8, core.VarDesc.VarType.INT16: 2, core.VarDesc.VarType.INT32: 4, core.VarDesc.VarType.INT64: 8, core.VarDesc.VarType.BOOL: 1, core.VarDesc.VarType.UINT8: 1, } SUB_BLOCK_OPS = [ "while", "while_grad", "parallel_do", "parallel_do_grad", "conditional_block", "conditional_block_grad" ] SUB_BLOCK_PAIR = [("while", "while_grad"), ("parallel_do", "parallel_do_grad"), ("conditional_block", "conditional_block_grad")] PRINT_LOG = False class ControlFlowGraph(object): def __init__(self, program, ops, forward_num, skip_opt): self._program = program self._ops = ops self._forward_num = forward_num self._successors = defaultdict(set) self._presuccessors = defaultdict(set) self._uses = defaultdict(set) self._defs = defaultdict(set) self._live_in = defaultdict(set) self._live_out = defaultdict(set) self._skip_opt = skip_opt def _add_connections(self, connections): """Populates _successors and _presuccessors for two neighbor nodes.""" for node1, node2 in connections: self._add(node1, node2) def _add(self, node1, node2): self._successors[node1].add(node2) self._presuccessors[node2].add(node1) # TODO(panyx0718): We need to have a unified way of building intermediate # representation. def _build_graph(self): """Build a graph based on op sequence. """ self.op_size = len(self._ops) op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)] self._add_connections(op_node_connections) for i in range(self.op_size): self._uses[i].update(self._ops[i].input_arg_names()) self._defs[i].update(self._ops[i].output_arg_names()) def _update_graph(self, old_name, new_name, begin_idx=0): for i in range(begin_idx, self.op_size): if old_name in self._uses[i]: self._uses[i].remove(old_name) self._uses[i].add(new_name) if old_name in self._defs[i]: self._defs[i].remove(old_name) self._defs[i].add(new_name) if old_name in self._live_in[i]: self._live_in[i].remove(old_name) self._live_out[i].add(new_name) if old_name in self._live_out[i]: self._live_out[i].remove(old_name) self._live_out[i].add(new_name) def _reach_fixed_point(self, live_in, live_out): """Check if the liveness set has stablized.""" if len(live_in) != len(self._live_in): return False if len(live_out) != len(self._live_out): return False for i in range(self.op_size): if (live_in[i] != self._live_in[i] or live_out[i] != self._live_out[i]): return False return True def _dataflow_analyze(self): self._build_graph() live_in = defaultdict(set) live_out = defaultdict(set) # Repeatedly apply liveness updates until the algorithm stablize # on a complete set live input vars and live output vars. while True: for i in reversed(range(self.op_size)): live_in[i] = set(self._live_in[i]) live_out[i] = set(self._live_out[i]) for s in self._successors[i]: self._live_out[i] |= self._live_in[s] self._live_in[i] = self._uses[i] | ( self._live_out[i] - self._defs[i]) if self._reach_fixed_point(live_in, live_out): break def _get_diff(self, a, b): u = a & b return a - u, b - u def _has_var(self, block_desc, var_name, is_forward): if is_forward: return block_desc.has_var(str(var_name)) else: return block_desc.has_var_recursive(str(var_name)) def _find_var(self, block_desc, var_name, is_forward): if is_forward: return block_desc.find_var(str(var_name)) else: return block_desc.find_var_recursive(str(var_name)) def _check_var_validity(self, block_desc, x, is_forward): if str(x) == "@EMPTY@": return False if not self._has_var(block_desc, x, is_forward): return False if self._find_var(block_desc, x, is_forward).persistable(): return False if self._find_var(block_desc, x, is_forward).type() != core.VarDesc.VarType.LOD_TENSOR: return False if x in self._skip_opt: return False if not self._find_var(block_desc, x, is_forward).shape(): return False return True # TODO(panyx0718): This needs to be less hacky. It seems memory optimization # doesn't consider vars copied between cpu and gpu. def _update_skip_opt_set(self): for i in range(self.op_size): op = self._ops[i] if op.type() == "fill_constant" and op.attr("force_cpu") == True: self._skip_opt.update(op.output_arg_names()) def release_memory(self): self._dataflow_analyze() self._update_skip_opt_set() fwd_id = 0 bwd_id = 0 for i in range(self.op_size): op = self._ops[i] if op.type() in SUB_BLOCK_OPS: continue block_desc = op.block() is_forward = i < self._forward_num in_diff, out_diff = self._get_diff(self._live_in[i], self._live_out[i]) can_optimize = filter( lambda x: self._check_var_validity(block_desc, x, is_forward), in_diff) if can_optimize: index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1 delete_op = block_desc.insert_op(index) delete_op.set_type("delete_var") delete_op.set_input("X", can_optimize) if is_forward: fwd_id += 1 else: bwd_id += 1 def memory_optimize(self, level=0): def compare_shape(x_shape, cache_shape, opt_level): if opt_level == 0: return x_shape == cache_shape elif opt_level == 1: if (x_shape[0] == -1) ^ (cache_shape[0] == -1): return False x_size = abs(reduce(lambda x, y: x * y, x_shape)) cache_size = abs(reduce(lambda x, y: x * y, cache_shape)) if x_size <= cache_size: return True else: raise ValueError("only support opt_level 0 or 1.") return False self._dataflow_analyze() self._update_skip_opt_set() self.pool = [] for i in range(self.op_size): op = self._ops[i] if op.type() in SUB_BLOCK_OPS: continue block_desc = op.block() is_forward = i < self._forward_num if self.pool: defs_can_optimize = filter( lambda x: self._check_var_validity(block_desc, x, is_forward), self._defs[i]) out_pair = [ (x, self._find_var(block_desc, x, is_forward).shape()) for x in defs_can_optimize ] for x, x_shape in out_pair: # If x is both in uses and defs, it can not be optimized! if x in self._uses[i]: continue for index, cache_pair in enumerate(self.pool): cache_var = cache_pair[0] cache_shape = cache_pair[1] if not compare_shape(x_shape, cache_shape, level): continue if not self._has_var(block_desc, cache_var, is_forward): continue x_dtype = self._find_var(block_desc, x, is_forward).dtype() cache_dtype = self._find_var(block_desc, cache_var, is_forward).dtype() # TODO(qijun): actually, we should compare # dtype_to_size[x_dtype] and dtype_to_size[cache_dtype] if x_dtype != cache_dtype: continue if PRINT_LOG: print(("Hit Cache !!!! cache pool index " "is %d, var name is %s, " "cached var name is %s, " "var shape is %s ") % (index, x, cache_var, str(cache_shape))) self.pool.pop(index) if x == cache_var: break # Rename the var to the cache var already with # memory allocated in order to reuse the memory. _rename_arg_(self._ops, x, cache_var, begin_idx=i) self._program.block(block_desc.id).var(str( x)).desc = self._find_var(block_desc, cache_var, is_forward) self._update_graph(x, cache_var, begin_idx=i) break in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i]) can_optimize = filter( lambda x: self._check_var_validity(block_desc, x, is_forward), in_diff) if can_optimize: for var_name in can_optimize: self.pool.append((var_name, self._find_var( block_desc, var_name, is_forward).shape())) def _process_sub_block_pair(pdesc, sub_block_pair): """Creates a list of tuple each of which tracks info of a subblock. Note: this function doesn't handle nested subblocks yet. TODO(panyx0718): assert if case nested subblocks happen. :param pdesc: ProgramDesc. :param sub_block_pair: A list op pairs. Each op pair is the forward op and backward op. The ops in the list are special that they contain a subblock of ops. :return: A list of tuples, each tuple is (all ops in a subblock pair including forward and backward, number of forward ops, all output args names of the ops in the subblock pairs). """ ops_list = [] block_desc = pdesc.block(0) op_size = block_desc.op_size() for fwd_op, bwd_op in sub_block_pair: sub_block_ids = [] grad_sub_block_ids = [] sub_block_id_pair = [] sub_op_dict = {} for i in range(op_size): op = block_desc.op(i) if op.type() == fwd_op: sub_block_ids.append(op.attr("sub_block").id) sub_op_dict[op.attr("sub_block").id] = op elif op.type() == bwd_op: grad_sub_block_ids.append(op.attr("sub_block").id) sub_op_dict[op.attr("sub_block").id] = op # Find fwd_op/bwd_op block pair for grad_id in grad_sub_block_ids: fwd_id = pdesc.block(grad_id).get_forward_block_idx() if fwd_id in sub_block_ids: sub_block_id_pair.append((fwd_id, grad_id)) sub_block_ids.remove(fwd_id) # Get fwd_op/bwd_op block ops for fwd_id, grad_id in sub_block_id_pair: sub_block_ops = [] sub_block = pdesc.block(fwd_id) block_op_size = sub_block.op_size() for i in range(block_op_size): sub_block_ops.append(sub_block.op(i)) grad_sub_block = pdesc.block(grad_id) grad_sub_block_op_size = grad_sub_block.op_size() for i in range(grad_sub_block_op_size): sub_block_ops.append(grad_sub_block.op(i)) sub_op_output = set() sub_op_output.update(sub_op_dict[fwd_id].output_arg_names()) sub_op_output.update(sub_op_dict[grad_id].output_arg_names()) ops_list.append((sub_block_ops, block_op_size, sub_op_output)) # Process rest fwd_op block ops for fwd_id in sub_block_ids: sub_block_ops = [] sub_block = pdesc.block(fwd_id) sub_block_op_size = sub_block.op_size() for i in range(sub_block_op_size): sub_block_ops.append(sub_block.op(i)) sub_op_output = set() sub_op_output.update(sub_op_dict[fwd_id].output_arg_names()) ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output)) return ops_list def _get_cfgs(input_program): """Process each block and create ControlFlowGraph for each of them. :param input_program: Program object. :return: A list of ControlFlowGraph, each corresponds to a block. """ ops_list = [] pdesc = input_program.get_desc() block_desc = pdesc.block(0) op_size = block_desc.op_size() # Get global block ops ops_list.append( ([block_desc.op(i) for i in range(op_size)], op_size, set())) # Only process one level of nested subblock. ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR)) cfgs = [ ControlFlowGraph(input_program, ops, forward_num, skip_opt) for ops, forward_num, skip_opt in ops_list ] return cfgs def memory_optimize(input_program, print_log=False, level=0): """Optimize memory by reusing var memory. Note: it doesn't not support subblock nested in subblock. :param input_program: Input Program :param print_log: whether to print debug log. :param level: If level=0, reuse if the shape is completely equal, o :return: """ if level != 0 and level != 1: raise ValueError("only support opt_level 0 or 1.") global PRINT_LOG PRINT_LOG = print_log cfgs = _get_cfgs(input_program) for cfg in cfgs: cfg.memory_optimize(level) def release_memory(input_program): cfgs = _get_cfgs(input_program) for cfg in cfgs: cfg.release_memory()
15,303
38.854167
92
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/__init__.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from distribute_transpiler import DistributeTranspiler from inference_transpiler import InferenceTranspiler from memory_optimization_transpiler import memory_optimize, release_memory from distribute_transpiler_simple import SimpleDistributeTranspiler from ps_dispatcher import HashName, RoundRobin __all__ = [ "DistributeTranspiler", "InferenceTranspiler", "SimpleDistributeTranspiler", "memory_optimize", "release_memory", "HashName", "RoundRobin" ]
1,073
41.96
80
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/distribute_transpiler.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Transpile the program to distributed data-parallelism programs. The main_program will be transformed to use a remote parameter server to do parameter optimization. And the optimization graph will be put into a parameter server program. Use different methods to split trainable variables to different parameter servers. Steps to transpile trainer: 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". 3. modify trainer program add split_op to each grad variable. 4. append send_op to send splited variables to server and fetch params(splited blocks or origin param) from server. 5. append concat_op to merge splited blocks to update local weights. Steps to transpile pserver: 1. create new program for parameter server. 2. create params and grad variables that assigned to current server instance. 3. create a sub-block in the server side program 4. append ops that should run on current server instance. 5. add listen_and_serv op """ from __future__ import print_function import math from ps_dispatcher import RoundRobin, HashName, PSDispatcher from .. import core, framework from ..framework import Program, default_main_program, \ default_startup_program, \ Variable, Parameter, grad_var_name from details import * LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName() RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( ) RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC class VarBlock: def __init__(self, varname, offset, size): self.varname = varname # NOTE: real offset is offset * size self.offset = offset self.size = size def __str__(self): return "%s:%d:%d" % (self.varname, self.offset, self.size) def same_or_split_var(p_name, var_name): return p_name == var_name or p_name.startswith(var_name + ".block") def split_variable(var_list, service_count, min_block_size=8192): """ We may need to split dense tensor to one or more blocks and put them equally onto parameter server. One block is a sub-tensor aligned by dim[0] of the tensor. We need to have a minimal block size so that the calculations in the parameter server side can gain better performance. By default minimum block size 8K elements (maybe 16bit or 32bit or 64bit). Args: var_list (list): List of variables. service_count (int): Numel of pserver services. A pserver may have two or more listening ports. min_block_size (int): Minimum splitted block size. Returns: blocks (list[(varname, block_id, current_block_size)]): A list of VarBlocks. Each VarBlock specifies a shard of the var. """ blocks = [] for var in var_list: split_count = service_count var_numel = reduce(lambda x, y: x * y, var.shape) max_pserver_count = int(math.floor(var_numel / float(min_block_size))) if max_pserver_count == 0: max_pserver_count = 1 if max_pserver_count < service_count: split_count = max_pserver_count block_size = int(math.ceil(var_numel / float(split_count))) if len(var.shape) >= 2: # align by dim1(width) dim1 = reduce(lambda x, y: x * y, var.shape[1:]) remains = block_size % dim1 if remains != 0: block_size += dim1 - remains # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) for block_id in xrange(split_count): curr_block_size = min(block_size, var_numel - ( (block_id) * block_size)) block = VarBlock(var.name, block_id, curr_block_size) blocks.append(str(block)) return blocks class DistributeTranspiler: def _has_distributed_lookup_table(self): # process lookup_table_op # 1. check all lookup_table_op is distributed # 2. check all lookup_table_op share the same table. distributed_lookup_table_ops = [] # support only one distributed_lookup_table now self.table_name = None for op in self.origin_program.global_block().ops: if op.type == LOOKUP_TABLE_TYPE: if op.attrs['is_distributed'] is True: if self.table_name is None: self.table_name = op.input("W")[0] if self.table_name != op.input("W")[0]: raise RuntimeError("all distributed lookup_table_ops" " should have only one table") distributed_lookup_table_ops.append(op) else: if self.table_name is not None: assert op.input("W")[0] != self.table_name return len(distributed_lookup_table_ops) > 0 def _update_dist_lookup_table_vars(self, param_list, grad_list, params_grads): # TODO(wuyi): put find a way to put dist lookup table stuff all together. # update self.table_param_grad and self.trainer_side_table_grad_list program = self.origin_program if self.has_distributed_lookup_table: param_list = [ param for param in param_list if param.name != self.table_name ] grad_list = [ grad for grad in grad_list if grad.name != grad_var_name(self.table_name) ] self.table_param_grad = [ param_grad for param_grad in params_grads if param_grad[0].name == self.table_name ][0] table_grad_var = self.table_param_grad[1] if self.sync_mode: self.trainer_side_table_grad_list = [ program.global_block().create_var( name="%s.trainer_%d.pserver_%d" % (table_grad_var.name, self.trainer_id, index), type=table_grad_var.type, shape=table_grad_var.shape, dtype=table_grad_var.dtype) for index in range(len(self.pserver_endpoints)) ] else: self.trainer_side_table_grad_list = [ program.global_block().create_var( name="%s.pserver_%d" % (table_grad_var.name, index), type=table_grad_var.type, shape=table_grad_var.shape, dtype=table_grad_var.dtype) for index in range(len(self.pserver_endpoints)) ] def _init_splited_vars(self, split_method): # update these mappings for further transpile: # 1. param_var_mapping: param var name -> [splited params vars] # 2. grad_var_mapping: grad var name -> [splited grads vars] # 3. grad_param_mapping: grad.blockx -> param.blockx # 4. param_grad_ep_mapping: ep -> {"params": [], "grads": []} param_list = [] grad_list = [] param_grad_set = set() for p, g in self.params_grads: # skip parameter marked not trainable if type(p) == Parameter and p.trainable == False: continue if p.name not in param_grad_set: param_list.append(p) param_grad_set.add(p.name) if g.name not in param_grad_set: grad_list.append(g) param_grad_set.add(g.name) self._update_dist_lookup_table_vars(param_list, grad_list, self.params_grads) grad_blocks = split_variable(grad_list, len(self.pserver_endpoints)) param_blocks = split_variable(param_list, len(self.pserver_endpoints)) assert (len(grad_blocks) == len(param_blocks)) # origin_varname -> [splited_var] self.param_var_mapping = self._create_vars_from_blocklist( self.origin_program, param_blocks) self.grad_var_mapping = self._create_vars_from_blocklist( self.origin_program, grad_blocks, add_trainer_suffix=self.trainer_num > 1) self.grad_param_mapping = dict() for g, p in zip(grad_blocks, param_blocks): g_name, g_bid, _ = g.split(":") p_name, p_bid, _ = p.split(":") self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \ self.param_var_mapping[p_name][int(p_bid)] # create mapping of endpoint -> split var to create pserver side program self.param_grad_ep_mapping = dict() [ self.param_grad_ep_mapping.update({ ep: { "params": [], "grads": [] } }) for ep in self.pserver_endpoints ] def transpile(self, trainer_id, program=None, pservers="127.0.0.1:6174", trainers=1, split_method=RoundRobin, sync_mode=True): """ :param trainer_id: one unique id for each trainer in a job. :type trainer_id: int :param program: program to transpile, default is default_main_program :type program: Program :param pservers: parameter server endpoints like "m1:6174,m2:6174" :type pservers: string :param trainers: total number of workers/trainers in the job :type trainers: int :param split_method: A function to determin how to split variables to different servers equally. :type split_method: function :type sync_mode: boolean default True :param sync_mode: if sync_mode is set True, it means that dist transpiler will transpile the program into sync_mode pserver and trainer program. """ assert (split_method.__bases__[0] == PSDispatcher) if program is None: program = default_main_program() self.origin_program = program self.trainer_num = trainers self.sync_mode = sync_mode self.trainer_id = trainer_id pserver_endpoints = pservers.split(",") self.pserver_endpoints = pserver_endpoints self.optimize_ops, self.params_grads = self._get_optimize_pass() ps_dispatcher = split_method(self.pserver_endpoints) self.has_distributed_lookup_table = self._has_distributed_lookup_table() # split and create vars, then put splited vars in dicts for later use. self._init_splited_vars(split_method) # step 3.1: insert send op to send gradient vars to parameter servers ps_dispatcher.reset() send_vars = [] for orig_varname, splited_vars in self.grad_var_mapping.items(): eplist = ps_dispatcher.dispatch(splited_vars) if len(splited_vars) == 1: orig_varname = splited_vars[0].name index = find_op_by_output_arg(program.global_block(), orig_varname) elif len(splited_vars) > 1: orig_var = program.global_block().vars[orig_varname] index = find_op_by_output_arg(program.global_block(), orig_varname) self._insert_split_op(program, orig_var, index, splited_vars) index += 1 else: AssertionError("Can not insert the send op by original " "variable name :", orig_varname) program.global_block().insert_op( index=index + 1, type="send_vars", inputs={"X": splited_vars}, outputs={}, attrs={ "epmap": eplist, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) for _, var in enumerate(splited_vars): send_vars.append(var) if self.sync_mode: program.global_block().append_op( type="send_barrier", inputs={}, outputs={}, attrs={ "endpoints": pserver_endpoints, "sync_mode": self.sync_mode, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) # step 3.2: insert recv op to receive parameters from parameter server recv_vars = [] for _, var in enumerate(send_vars): recv_vars.append(self.grad_param_mapping[var]) ps_dispatcher.reset() eplist = ps_dispatcher.dispatch(recv_vars) for i, ep in enumerate(eplist): self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i]) self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) # step4: Concat the parameters splits together after recv. for varname, splited_var in self.param_var_mapping.iteritems(): eps = [] for var in splited_var: index = [v.name for v in recv_vars].index(var.name) eps.append(eplist[index]) program.global_block().append_op( type="recv", inputs={}, outputs={"Out": splited_var}, attrs={ "epmap": eps, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) program.global_block().append_op( type="fetch_barrier", inputs={}, outputs={}, attrs={ "endpoints": pserver_endpoints, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) for varname, splited_var in self.param_var_mapping.iteritems(): if len(splited_var) <= 1: continue orig_param = program.global_block().vars[varname] program.global_block().append_op( type="concat", inputs={"X": splited_var}, outputs={"Out": [orig_param]}, attrs={"axis": 0}) if self.has_distributed_lookup_table: self._replace_lookup_table_op_with_prefetch(program, pserver_endpoints) self._split_table_grad_and_add_send_vars(program, pserver_endpoints) def get_trainer_program(self): # remove optimize ops and add a send op to main_program delete_ops(self.origin_program.global_block(), self.optimize_ops) # FIXME(typhoonzero): serialize once will fix error occurs when clone. self.origin_program.__str__() return self.origin_program def get_pserver_program(self, endpoint): """ Get pserver side program using the endpoint. TODO(panyx0718): Revisit this assumption. what if #blocks > #pservers. NOTE: assume blocks of the same variable is not distributed on the same pserver, only change param/grad varnames for trainers to fetch. """ # step1 pserver_program = Program() # step2: Create vars to receive vars at parameter servers. recv_inputs = [] for v in self.param_grad_ep_mapping[endpoint]["params"]: self._clone_var(pserver_program.global_block(), v) for v in self.param_grad_ep_mapping[endpoint]["grads"]: # create vars for each trainer in global scope, so # we don't need to create them when grad arrives. # change client side var name to origin name by # removing ".trainer_%d" suffix suff_idx = v.name.find(".trainer_") if suff_idx >= 0: orig_var_name = v.name[:suff_idx] else: orig_var_name = v.name # NOTE: single_trainer_var must be created for multi-trainer # case to merge grads from multiple trainers single_trainer_var = \ pserver_program.global_block().create_var( name=orig_var_name, persistable=True, type=v.type, dtype=v.dtype, shape=v.shape) if self.sync_mode and self.trainer_num > 1: for trainer_id in xrange(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), persistable=False, type=v.type, dtype=v.dtype, shape=v.shape) recv_inputs.append(var) else: recv_inputs.append(single_trainer_var) # step 3 # Create a union-find data structure from optimize ops, # If two ops are connected, we could add these two ops # into one set. ufind = self._create_ufind(self.optimize_ops) # step 3.2 # Iterate through the ops and append optimize op which # located on current pserver opt_op_on_pserver = [] for _, op in enumerate(self.optimize_ops): if self._is_optimizer_op(op) and self._is_opt_op_on_pserver( endpoint, op): opt_op_on_pserver.append(op) # step 3.3 # Iterate through the ops, and if an op and the optimize ops # which located on current pserver are in one set, then # append it into the sub program. global_ops = [] # HACK: optimization global ops only used to scale beta1 and beta2 # replace it with dependency engine. for op in self.optimize_ops: if self._is_adam_connected_op(op): global_ops.append(op) def __append_optimize_op__(op, block, grad_to_block_id, merged_var): if self._is_optimizer_op(op): self._append_pserver_ops(block, op, endpoint, grad_to_block_id, self.origin_program, merged_var) else: self._append_pserver_non_opt_ops(block, op, endpoint) def __op_have_grad_input__(op): for varname in op.input_arg_names: if varname.find("@GRAD") >= 0: return varname return "" # append lr decay ops to the child block if exists lr_ops = self._get_lr_ops() if len(lr_ops) > 0: lr_decay_block = pserver_program.create_block( pserver_program.num_blocks - 1) for _, op in enumerate(lr_ops): self._append_pserver_non_opt_ops(lr_decay_block, op, endpoint) # append op to the current block grad_to_block_id = [] pre_block_idx = pserver_program.num_blocks - 1 for idx, opt_op in enumerate(opt_op_on_pserver): per_opt_block = pserver_program.create_block(pre_block_idx) # append grad merging ops before clip and weight decay for _, op in enumerate(self.optimize_ops): # find the origin @GRAD var before clipping grad_varname_for_block = __op_have_grad_input__(op) if ufind.is_connected(op, opt_op) and grad_varname_for_block: merged_var = self._append_pserver_grad_merge_ops( per_opt_block, grad_varname_for_block, endpoint, grad_to_block_id, self.origin_program) for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself if ufind.is_connected(op, opt_op) and op not in global_ops: __append_optimize_op__(op, per_opt_block, grad_to_block_id, merged_var) # append global ops if global_ops: opt_state_block = pserver_program.create_block( pserver_program.num_blocks - 1) for glb_op in global_ops: __append_optimize_op__(glb_op, opt_state_block, grad_to_block_id, None) # process distributed lookup_table prefetch_block = None if self.has_distributed_lookup_table: pserver_index = self.pserver_endpoints.index(endpoint) table_opt_block = self._create_table_optimize_block( pserver_index, pserver_program, pre_block_idx, grad_to_block_id) prefetch_block = self._create_prefetch_block( pserver_index, pserver_program, table_opt_block) # NOTE: if has_distributed_lookup_table is False, then prefetch_block will # not be executed, so it's safe to use optimize_block to hold the place if self.has_distributed_lookup_table: assert prefetch_block is not None else: assert prefetch_block is None prefetch_block = pserver_program.global_block() # step5 append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", inputs={'X': recv_inputs}, outputs={}, attrs={ "OptimizeBlock": pserver_program.block(1), "endpoint": endpoint, "Fanin": self.trainer_num, "PrefetchBlock": prefetch_block, "sync_mode": self.sync_mode, "grad_to_block_id": grad_to_block_id }) pserver_program.sync_with_cpp() return pserver_program def get_startup_program(self, endpoint, pserver_program): """ Get startup program for current parameter server. Modify operator input variables if there are variables that were split to several blocks. """ s_prog = Program() orig_s_prog = default_startup_program() params = self.param_grad_ep_mapping[endpoint]["params"] def _get_splited_name_and_shape(varname): for idx, splited_param in enumerate(params): pname = splited_param.name if same_or_split_var(pname, varname) and varname != pname: return pname, splited_param.shape return "", [] # 1. create vars in pserver program to startup program pserver_vars = pserver_program.global_block().vars created_var_map = dict() for _, var in pserver_vars.iteritems(): tmpvar = s_prog.global_block().clone_variable(var) created_var_map[var.name] = tmpvar # 2. rename op outputs for op in orig_s_prog.global_block().ops: new_inputs = dict() new_outputs = dict() # do not append startup op if var is not on this pserver op_on_pserver = False for key in op.output_names: newname, _ = _get_splited_name_and_shape(op.output(key)[0]) if newname: op_on_pserver = True new_outputs[key] = created_var_map[newname] elif op.output(key)[0] in pserver_vars: op_on_pserver = True new_outputs[key] = pserver_vars[op.output(key)[0]] # most startup program ops have no inputs new_inputs = self._get_input_map_from_op(pserver_vars, op) if op_on_pserver: if op.type in [ "gaussian_random", "fill_constant", "uniform_random" ]: op.attrs["shape"] = new_outputs["Out"].shape s_prog.global_block().append_op( type=op.type, inputs=new_inputs, outputs=new_outputs, attrs=op.attrs) return s_prog # ====================== private transpiler functions ===================== # transpiler function for dis lookup_table def _replace_lookup_table_op_with_prefetch(self, program, pserver_endpoints): # 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op self.prefetch_input_vars = None self.prefetch_output_vars = None continue_search_lookup_table_op = True while continue_search_lookup_table_op: continue_search_lookup_table_op = False all_ops = program.global_block().ops for op in all_ops: if op.type == LOOKUP_TABLE_TYPE: continue_search_lookup_table_op = True op_index = list(all_ops).index(op) ids_name = op.input("Ids") out_name = op.output("Out") if self.prefetch_input_vars is None: ids_var = program.global_block().vars[ids_name[0]] self.prefetch_input_vars = self.create_splited_vars( source_var=ids_var, block=program.global_block(), tag="_prefetch_in_") if self.prefetch_output_vars is None: out_var = program.global_block().vars[out_name[0]] self.prefetch_output_vars = self.create_splited_vars( source_var=out_var, block=program.global_block(), tag="_prefetch_out_") # insert split_ids_op program.global_block().insert_op( index=op_index, type="split_ids", inputs={ 'Ids': [ program.global_block().vars[varname] for varname in ids_name ] }, outputs={"Out": self.prefetch_input_vars}) # insert prefetch_op program.global_block().insert_op( index=op_index + 1, type="prefetch", inputs={'X': self.prefetch_input_vars}, outputs={"Out": self.prefetch_output_vars}, attrs={ "epmap": pserver_endpoints, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) # insert concat_op program.global_block().insert_op( index=op_index + 2, type="concat", inputs={'X': self.prefetch_output_vars}, outputs={ "Out": [ program.global_block().vars[varname] for varname in out_name ] }, attrs={"axis": 0}) # delete lookup_table_op delete_ops(program.global_block(), [op]) # break for loop break def _split_table_grad_and_add_send_vars(self, program, pserver_endpoints): # 2. add split_ids_op and send_vars_op to send gradient to pservers # there should only be one table_name all_ops = program.global_block().ops table_grad_name = grad_var_name(self.table_name) for op in all_ops: if table_grad_name in op.output_arg_names: op_index = list(all_ops).index(op) # insert split_ids_op program.global_block().insert_op( index=op_index + 1, type="split_ids", inputs={ 'Ids': [program.global_block().vars[table_grad_name]] }, outputs={"Out": self.trainer_side_table_grad_list}) program.global_block().insert_op( index=op_index + 2, type="send_vars", inputs={'X': self.trainer_side_table_grad_list}, outputs={}, attrs={ "sync_send": True, "epmap": pserver_endpoints, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) break def _create_prefetch_block(self, pserver_index, pserver_program, optimize_block): # STEP: create prefetch block table_var = pserver_program.global_block().vars[self.table_name] prefetch_block = pserver_program.create_block(optimize_block.idx) trainer_ids = self.prefetch_input_vars[pserver_index] pserver_ids = pserver_program.global_block().create_var( name=trainer_ids.name, type=trainer_ids.type, shape=trainer_ids.shape, dtype=trainer_ids.dtype) trainer_out = self.prefetch_output_vars[pserver_index] pserver_out = pserver_program.global_block().create_var( name=trainer_out.name, type=trainer_out.type, shape=trainer_out.shape, dtype=trainer_out.dtype) prefetch_block.append_op( type="lookup_sparse_table", inputs={'Ids': pserver_ids, "W": table_var}, outputs={"Out": pserver_out}, attrs={ "is_sparse": True, # has no effect on lookup_table op "is_distributed": True, "padding_idx": -1 }) return prefetch_block def _create_table_optimize_block(self, pserver_index, pserver_program, pre_block_idx, grad_to_block_id): # STEP: create table optimize block # create table param and grad var in pserver program origin_param_var = self.origin_program.global_block().vars[ self.table_name] param_var = pserver_program.global_block().create_var( name=origin_param_var.name, shape=origin_param_var.shape, dtype=origin_param_var.dtype, type=core.VarDesc.VarType.SELECTED_ROWS, persistable=True) # parameter must be selected rows param_var.desc.set_type(core.VarDesc.VarType.SELECTED_ROWS) grad_var = pserver_program.global_block().clone_variable( self.origin_program.global_block().vars[grad_var_name( self.table_name)]) # create table optimize block in pserver program table_opt_op = [ op for op in self.optimize_ops if op.input("Param")[0] == self.table_name ][0] table_opt_block = pserver_program.create_block(pre_block_idx) # only support sgd now assert table_opt_op.type == "sgd" if self.sync_mode: # create grad vars in pserver program table_grad_var = self.table_param_grad[1] pserver_side_table_grad_list = [ pserver_program.global_block().create_var( name="%s.trainer_%d.pserver_%d" % (table_grad_var.name, index, pserver_index), type=table_grad_var.type, shape=table_grad_var.shape, dtype=table_grad_var.dtype) for index in range(self.trainer_num) ] # append sum op for pserver_side_table_grad_list table_opt_block.append_op( type="sum", inputs={"X": pserver_side_table_grad_list}, outputs={"Out": [grad_var]}) else: # in async_mode, for table gradient, it also need to be splited to each parameter server origin_grad_name = grad_var.name splited_grad_name = self.trainer_side_table_grad_list[ pserver_index].name if not splited_grad_name.startswith(origin_grad_name): raise ValueError("origin_grad_var: " + splited_grad_name + " grad_var:" + grad_var.name) grad_var = pserver_program.global_block().rename_var( origin_grad_name, splited_grad_name) lr_var = pserver_program.global_block().vars[table_opt_op.input( "LearningRate")[0]] inputs = { "Param": [param_var], "Grad": [grad_var], "LearningRate": [lr_var] } outputs = {"ParamOut": [param_var]} table_opt_block.append_op( type=table_opt_op.type, inputs=inputs, outputs=outputs, attrs=table_opt_op.attrs) # add table parameter gradient and it's block id to grad_to_block_id grad_to_block_id.append(grad_var.name + ":" + str(table_opt_block.idx)) return table_opt_block def _create_vars_from_blocklist(self, program, block_list, add_trainer_suffix=False): """ Create vars for each split. NOTE: only grads need to be named for different trainers, use add_trainer_suffix to rename the grad vars. Args: program (ProgramDesc): ProgramDesc which gradients blong. block_list (list[(varname, block_id, block_size)]): List of gradient blocks. add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True. Returns: var_mapping (dict(varname->[new_varname_variable])):A dict mapping from original var name to each var split. """ # varname->[(block_id, current_block_size)] block_map = dict() var_mapping = dict() for block_str in block_list: varname, offset, size = block_str.split(":") if not block_map.has_key(varname): block_map[varname] = [] block_map[varname].append((long(offset), long(size))) # Do not remove this important debug message: print("block map: %s" % block_map) for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) if len(splited) == 1: if self.sync_mode and add_trainer_suffix: new_var_name = "%s.trainer_%d" % \ (orig_var.name, self.trainer_id) program.global_block().rename_var(varname, new_var_name) var_mapping[varname] = \ [program.global_block().var(new_var_name)] else: var_mapping[varname] = \ [program.global_block().var(orig_var.name)] continue var_mapping[varname] = [] orig_shape = orig_var.shape orig_dim1_flatten = 1 if len(orig_shape) >= 2: orig_dim1_flatten = reduce(lambda x, y: x * y, orig_shape[1:]) for i, block in enumerate(splited): size = block[1] rows = size / orig_dim1_flatten splited_shape = [rows] if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) new_var_name = "" if self.sync_mode and add_trainer_suffix: new_var_name = "%s.block%d.trainer_%d" % \ (varname, i, self.trainer_id) else: new_var_name = "%s.block%d" % \ (varname, i) var = program.global_block().create_var( name=new_var_name, persistable=False, dtype=orig_var.dtype, type=orig_var.type, shape=splited_shape) # flattend splited var var_mapping[varname].append(var) program.global_block().sync_with_cpp() return var_mapping def create_splited_vars(self, source_var, block, tag): return [ block.create_var( name=str(source_var.name + tag + str(index)), type=source_var.type, shape=source_var.shape, dtype=source_var.dtype) for index in range(len(self.pserver_endpoints)) ] def _clone_var(self, block, var, persistable=True): assert isinstance(var, Variable) return block.create_var( name=var.name, shape=var.shape, dtype=var.dtype, type=var.type, lod_level=var.lod_level, persistable=persistable) def _insert_split_op(self, program, orig_var, index, splited_vars): if orig_var.type == core.VarDesc.VarType.SELECTED_ROWS: height_sections = [] for v in splited_vars: height_sections.append(v.shape[0]) program.global_block().insert_op( index=index + 1, type="split_selected_rows", inputs={"X": orig_var}, outputs={"Out": splited_vars}, attrs={"height_sections": height_sections}) elif orig_var.type == core.VarDesc.VarType.LOD_TENSOR: sections = [] for v in splited_vars: sections.append(v.shape[0]) program.global_block().insert_op( index=index + 1, type="split_byref", inputs={"X": orig_var}, outputs={"Out": splited_vars}, attrs={"sections": sections} # assume split evenly ) else: AssertionError("Variable type should be in set " "[LOD_TENSOR, SELECTED_ROWS]") def _get_optimizer_input_shape(self, op_type, varkey, orig_shape, param_shape): """ Returns the shape for optimizer inputs that need to be reshaped when Param and Grad is split to multiple servers. """ # HACK(typhoonzero): Should use functions of corresponding optimizer in # optimizer.py to get the shape, do not bind this in the transpiler. if op_type == "adam": if varkey in ["Moment1", "Moment2"]: return param_shape elif op_type == "adagrad": if varkey == "Moment": return param_shape elif op_type == "adamax": if varkey in ["Moment", "InfNorm"]: return param_shape elif op_type == "momentum": if varkey == "Velocity": return param_shape elif op_type == "": if varkey == "Moment": return param_shape elif op_type == "sgd": pass return orig_shape def _get_varname_parts(self, varname): # returns origin, blockid, trainerid orig_var_name = "" trainer_part = "" block_part = "" trainer_idx = varname.find(".trainer_") if trainer_idx >= 0: trainer_part = varname[trainer_idx + 1:] else: trainer_idx = len(varname) block_index = varname.find(".block") if block_index >= 0: block_part = varname[block_index + 1:trainer_idx] else: block_index = len(varname) orig_var_name = varname[0:min(block_index, trainer_idx)] return orig_var_name, block_part, trainer_part def _orig_varname(self, varname): orig, _, _ = self._get_varname_parts(varname) return orig def _append_pserver_grad_merge_ops(self, optimize_block, grad_varname_for_block, endpoint, grad_to_block_id, origin_program): program = optimize_block.program pserver_block = program.global_block() grad_block = None for g in self.param_grad_ep_mapping[endpoint]["grads"]: if self._orig_varname(g.name) == \ self._orig_varname(grad_varname_for_block): grad_block = g break if not grad_block: # do not append this op if current endpoint # is not dealing with this grad block return orig_varname, block_name, trainer_name = self._get_varname_parts( grad_block.name) if block_name: merged_var_name = '.'.join([orig_varname, block_name]) else: merged_var_name = orig_varname merged_var = \ pserver_block.vars[merged_var_name] grad_to_block_id.append(merged_var.name + ":" + str(optimize_block.idx)) if self.sync_mode and self.trainer_num > 1: vars2merge = [] for i in xrange(self.trainer_num): per_trainer_name = "%s.trainer_%d" % \ (merged_var_name, i) vars2merge.append(pserver_block.vars[per_trainer_name]) optimize_block.append_op( type="sum", inputs={"X": vars2merge}, outputs={"Out": merged_var}) # TODO(panyx0718): What if it's SELECTED_ROWS. if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: optimize_block.append_op( type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, attrs={"scale": 1.0 / float(self.trainer_num)}) return merged_var def _append_pserver_ops(self, optimize_block, opt_op, endpoint, grad_to_block_id, origin_program, merged_var): program = optimize_block.program pserver_block = program.global_block() new_inputs = dict() # update param/grad shape first, then other inputs like # moment can use the updated shape for key in opt_op.input_names: if key == "Grad": new_inputs[key] = merged_var elif key == "Param": # param is already created on global program param_block = None for p in self.param_grad_ep_mapping[endpoint]["params"]: if same_or_split_var(p.name, opt_op.input(key)[0]): param_block = p break if not param_block: return tmpvar = pserver_block.create_var( name=param_block.name, persistable=True, dtype=param_block.dtype, shape=param_block.shape) new_inputs[key] = tmpvar elif key == "LearningRate": # learning rate variable has already be created by non-optimize op, # don't create it once again. lr_varname = opt_op.input(key)[0] if pserver_block.vars.has_key(lr_varname): new_inputs[key] = pserver_block.vars[opt_op.input(key)[0]] else: origin_var = origin_program.global_block().vars[lr_varname] tmpvar = pserver_block.create_var( name=origin_var.name, persistable=origin_var.persistable, dtype=origin_var.dtype, shape=origin_var.shape) new_inputs[key] = tmpvar for key in opt_op.input_names: new_shape = None if key in ["Param", "Grad", "LearningRate"]: continue var = self.origin_program.global_block().vars[opt_op.input(key)[0]] # update accumulator variable shape param_shape = new_inputs["Param"].shape new_shape = self._get_optimizer_input_shape(opt_op.type, key, var.shape, param_shape) tmpvar = pserver_block.create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, shape=new_shape) new_inputs[key] = tmpvar # change output's ParamOut variable outputs = self._get_output_map_from_op( self.origin_program.global_block().vars, opt_op) outputs["ParamOut"] = new_inputs["Param"] optimize_block.append_op( type=opt_op.type, inputs=new_inputs, outputs=outputs, attrs=opt_op.attrs) def _is_splited_grad_var(self, var, var_dict): grad_block = None for _, g in var_dict.iteritems(): if self._orig_varname(g.name) == self._orig_varname(var.name): if g.name.find(".trainer_") == -1: grad_block = g break return grad_block def _append_pserver_non_opt_ops(self, optimize_block, opt_op, endpoint): program = optimize_block.program # Append the ops for parameters that do not need to be optimized/updated inputs = self._get_input_map_from_op( self.origin_program.global_block().vars, opt_op) for key, varlist in inputs.iteritems(): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: # for ops like clipping and weight decay, get the splited var # for inputs/outputs grad_block = self._is_splited_grad_var( var, program.global_block().vars) if grad_block: inputs[key] = grad_block elif not program.global_block().vars.has_key(var.name): program.global_block().create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, shape=var.shape) outputs = self._get_output_map_from_op( self.origin_program.global_block().vars, opt_op) for key, varlist in outputs.iteritems(): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: grad_block = self._is_splited_grad_var( var, program.global_block().vars) if grad_block: outputs[key] = grad_block elif not program.global_block().vars.has_key(var.name): program.global_block().clone_variable(var) optimize_block.append_op( type=opt_op.type, inputs=inputs, outputs=outputs, attrs=opt_op.attrs) def _is_op_connected(self, op1, op2): # If one op's input is another op's output or # one op's output is another op's input, we say # the two operator is connected. def _append_inname_remove_beta(varname_list): op_input_names = [] for in_name in varname_list: # HACK: remove beta1 and beta2 to avoid let all # ops connected. if in_name.startswith("beta2_pow_acc") or \ in_name.startswith("beta1_pow_acc"): continue else: op_input_names.append(in_name) return op_input_names op1_input_names = _append_inname_remove_beta(op1.desc.input_arg_names()) op1_output_names = op1.desc.output_arg_names() op2_input_names = _append_inname_remove_beta(op2.desc.input_arg_names()) op2_output_names = op2.desc.output_arg_names() if set(op1_output_names) & set(op2_input_names) or \ set(op1_input_names) & set(op2_output_names): return True return False def _create_ufind(self, optimize_ops): # Create a unit find data struct by optimize ops ufind = UnionFind(optimize_ops) for i in xrange(len(optimize_ops)): for j in xrange(i, len(optimize_ops)): op1 = optimize_ops[i] op2 = optimize_ops[j] if self._is_op_connected(op1, op2): ufind.union(op1, op2) return ufind def _is_opt_role_op(self, op): # NOTE: depend on oprole to find out whether this op is for # optimize op_maker = core.op_proto_and_checker_maker optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize if op_maker.kOpRoleAttrName() in op.attrs and \ int(op.attrs[op_maker.kOpRoleAttrName()]) == int(optimize_role): return True return False def _is_optimizer_op(self, op): if "Param" in op.input_names and \ "LearningRate" in op.input_names: return True return False def _is_opt_op_on_pserver(self, endpoint, op): param_names = [ p.name for p in self.param_grad_ep_mapping[endpoint]["params"] ] if op.input("Param")[0] in param_names: return True else: for n in param_names: param = op.input("Param")[0] if same_or_split_var(n, param) and n != param: return True return False def _get_input_map_from_op(self, varmap, op): """Returns a dict from op input name to the vars in varmap.""" iomap = dict() for key in op.input_names: vars = [] for varname in op.input(key): vars.append(varmap[varname]) if len(vars) == 1: iomap[key] = vars[0] else: iomap[key] = vars return iomap def _get_output_map_from_op(self, varmap, op): """Returns a dict from op output name to the vars in varmap.""" iomap = dict() for key in op.output_names: vars = [] for varname in op.output(key): vars.append(varmap[varname]) if len(vars) == 1: iomap[key] = vars[0] else: iomap[key] = vars return iomap def _get_lr_ops(self): lr_ops = [] # find learning rate variables by optimize op lr_vars = set() for op in self.optimize_ops: if self._is_optimizer_op(op): lr_vars.add(op.input("LearningRate")[0]) find_ops = [] # find ops which output is lr var block = self.origin_program.global_block() for op in block.ops: if set(op.output_arg_names) & lr_vars: find_ops.append(op) # make a union find struct by the ops in default_main_program ufind = UnionFind(block.ops) for op1 in block.ops: for op2 in block.ops: # NOTE: we need to skip all optimize ops, since it is connected # with forward/backward ops and lr ops, we only need the lr ops. if op1 != op2 and self._is_op_connected(op1, op2) and \ not self._is_optimizer_op(op1) and not self._is_optimizer_op(op2): ufind.union(op1, op2) # find all ops which is related with lr var for op1 in block.ops: for op2 in find_ops: if ufind.is_connected(op1, op2): lr_ops.append(op1) # we only need to append op for once break return lr_ops def _get_optimize_pass(self): """ Get optimizer operators, paramters and gradients from origin_program Returns: opt_ops (list): optimize operators. params_grads (dict): paramter->gradient. """ block = self.origin_program.global_block() opt_ops = [] params_grads = [] origin_var_dict = self.origin_program.global_block().vars for op in block.ops: if self._is_opt_role_op(op): opt_ops.append(op) # HACK(wuyi): if we find grad vars from input of optimize # ops, we may get the output of clip op. Use syntax "@GRAD" # and op_role_var to get the pair. for input_name in op.input_arg_names: if input_name.find("@GRAD") != -1 and \ op.attrs[RPC_OP_ROLE_ATTR_NAME]: param_name = op.attrs[OP_ROLE_VAR_ATTR_NAME][0] params_grads.append([ origin_var_dict[param_name], origin_var_dict[input_name] ]) elif self._is_adam_connected_op(op): opt_ops.append(op) else: pass return opt_ops, params_grads def _is_adam_connected_op(self, op): """ A hack function to determinate whether the input operator is connected to optimize operator. """ if op.type == "scale": for in_name in op.input_arg_names: if in_name.startswith("beta1_pow_acc") or \ in_name.startswith("beta2_pow_acc"): return True return False
54,199
41.34375
100
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/ps_dispatcher.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class PSDispatcher(object): """ PSDispatcher is the base class for dispatching vars into different pserver instance. You need to implement the `dispatch` inferface. """ def __init__(self, pserver_endpoints): self._eps = pserver_endpoints self._step = 0 @property def eps(self): return self._eps def reset(self): self._step = 0 def dispatch(self, varlist): """ :param varlist: a list of Variables :return: a map of pserver endpoint -> varname """ AssertionError("Interface has not been implemented.") class HashName(PSDispatcher): """ Hash variable names to several endpoints """ def __init__(self, pserver_endpoints): super(self.__class__, self).__init__(pserver_endpoints) def _hash_block(self, block_str, total): return hash(block_str) % total def dispatch(self, varlist): eplist = [] for var in varlist: server_id = self._hash_block(var.name(), len(self._eps)) server_for_param = self._eps[server_id] eplist.append(server_for_param) return eplist class RoundRobin(PSDispatcher): """ Distribute variables to serveral endpoints. """ def __init__(self, pserver_endpoints): super(self.__class__, self).__init__(pserver_endpoints) def dispatch(self, varlist): eplist = [] for var in varlist: server_for_param = self._eps[self._step] eplist.append(server_for_param) self._step += 1 if self._step >= len(self._eps): self._step = 0 return eplist
2,299
28.113924
74
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/details/program_utils.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def delete_ops(block, ops): try: start = list(block.ops).index(ops[0]) end = list(block.ops).index(ops[-1]) [block.remove_op(start) for _ in xrange(end - start + 1)] except Exception, e: raise e block.program.sync_with_cpp() def find_op_by_input_arg(block, arg_name): for index, op in enumerate(block.ops): if arg_name in op.input_arg_names: return index return -1 def find_op_by_output_arg(block, arg_name): for index, op in enumerate(block.ops): if arg_name in op.output_arg_names: return index return -1
1,223
31.210526
74
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/details/__init__.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from program_utils import * from ufind import *
659
37.823529
74
py
Paddle
Paddle-master/python/paddle/fluid/transpiler/details/ufind.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class UnionFind(object): """ Union-find data structure. Union-find is a data structure that keeps track of a set of elements partitioned into a number of disjoint (non-overlapping) subsets. Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure Args: elements(list): The initialize element list. """ def __init__(self, elementes=None): self._parents = [] # index -> parent index self._index = {} # element -> index self._curr_idx = 0 if not elementes: elementes = [] for ele in elementes: self._parents.append(self._curr_idx) self._index.update({ele: self._curr_idx}) self._curr_idx += 1 def find(self, x): # Find the root index of given element x, # execute the path compress while findind the root index if not x in self._index: return -1 idx = self._index[x] while idx != self._parents[idx]: t = self._parents[idx] self._parents[idx] = self._parents[t] idx = t return idx def union(self, x, y): # Union two given element x_root = self.find(x) y_root = self.find(y) if x_root == y_root: return self._parents[x_root] = y_root def is_connected(self, x, y): # If two given elements have the same root index, # then they are connected. return self.find(x) == self.find(y)
2,116
31.569231
84
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_gradient_clip.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import paddle import paddle.fluid as fluid BATCH_SIZE = 128 CLIP = 1 prog = fluid.framework.Program() with fluid.program_guard(main_program=prog): image = fluid.layers.data(name='x', shape=[784], dtype='float32') hidden1 = fluid.layers.fc(input=image, size=128, act='relu') hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() avg_cost_clip = prog_clip.block(0).var(avg_cost.name) p_g = fluid.backward.append_backward(loss=avg_cost) p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip) with fluid.program_guard(main_program=prog_clip): fluid.clip.set_gradient_clip( fluid.clip.GradientClipByGlobalNorm(clip_norm=CLIP)) p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip) grad_list = [elem[1] for elem in p_g] grad_clip_list = [elem[1] for elem in p_g_clip] train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), batch_size=BATCH_SIZE) place = fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[image, label], place=place) exe.run(fluid.default_startup_program()) count = 0 for data in train_reader(): count += 1 if count > 5: break out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list) out_clip = exe.run(prog_clip, feed=feeder.feed(data), fetch_list=grad_clip_list) global_norm = 0 for v in out[1:]: global_norm += np.sum(np.power(v, 2)) global_norm = np.sqrt(global_norm) global_norm_clip = 0 for v in out_clip[1:]: global_norm_clip += np.sum(np.power(v, 2)) global_norm_clip = np.sqrt(global_norm_clip) if not np.isclose( a=global_norm_clip, b=np.minimum(global_norm, CLIP), rtol=5e-3): exit(1) exit(0)
2,703
31.578313
76
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_data_feeder.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid as fluid import unittest class TestDataFeeder(unittest.TestCase): def test_lod_level_0_converter(self): img = fluid.layers.data(name='image', shape=[1, 28, 28]) label = fluid.layers.data(name='label', shape=[1], dtype='int64') feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) print(result) self.assertEqual(result['image'].shape(), [2, 1, 28, 28]) self.assertEqual(result['label'].shape(), [2, 1]) self.assertEqual(result['image'].lod(), []) self.assertEqual(result['label'].lod(), []) def test_lod_level_1_converter(self): # lod_level = 1 # each sentence has a different number of words sentences = fluid.layers.data( name='sentences', shape=[1], dtype='int64', lod_level=1) label = fluid.layers.data(name='label', shape=[1], dtype='int64') feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace()) # lod = [[0, 3, 5, 9]] # data = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] # label = [1] * len(data) result = feeder.feed( [([1, 2, 3], [1]), ([4, 5], [1]), ([6, 7, 8, 9], [1])]) print(result) self.assertEqual(result['sentences'].shape(), [9, 1]) self.assertEqual(result['label'].shape(), [3, 1]) self.assertEqual(result['sentences'].lod(), [[0, 3, 5, 9]]) self.assertEqual(result['label'].lod(), []) def test_lod_level_2_converter(self): # lod_level = 2 # paragraphs -> sentences -> words paragraphs = fluid.layers.data( name='paragraphs', shape=[1], dtype='int64', lod_level=2) label = fluid.layers.data(name='label', shape=[1], dtype='int64') feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace()) # lod = [[0, 2, 3], [0, 3, 5, 9]] # data = [[[1, 2, 3], [4, 5]], [[6, 7, 8, 9]]] # label = [1] * len(data) result = feeder.feed( [([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])]) print(result) self.assertEqual(result['paragraphs'].shape(), [9, 1]) self.assertEqual(result['label'].shape(), [2, 1]) self.assertEqual(result['paragraphs'].lod(), [[0, 2, 3], [0, 3, 5, 9]]) self.assertEqual(result['label'].lod(), []) if __name__ == '__main__': unittest.main()
3,040
39.546667
79
py
Paddle
Paddle-master/python/paddle/fluid/tests/notest_concurrency.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.executor import Executor class TestRoutineOp(unittest.TestCase): def test_simple_routine(self): ch = fluid.make_channel( dtype=core.VarDesc.VarType.BOOL, name="CreateChannel") with fluid.Go(): fluid.channel_send(ch, True) result = fluid.channel_recv(ch) fluid.channel_close(ch) cpu = core.CPUPlace() exe = Executor(cpu) outs = exe.run(fetch_list=[result]) self.assertEqual(outs[0], True) if __name__ == '__main__': unittest.main()
1,243
30.1
74
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_cpp_reader.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.fluid as fluid import numpy as np import sys startup_prog = fluid.framework.Program() startup_block = startup_prog.current_block() random_reader = startup_block.create_var( type=fluid.core.VarDesc.VarType.READER, name="RandomDataGenerator") random_reader.desc.set_dtypes( [fluid.core.VarDesc.VarType.FP32, fluid.core.VarDesc.VarType.FP32]) random_reader.persistable = True shuffle_reader = startup_block.create_var( type=fluid.core.VarDesc.VarType.READER, name="ShuffleReader") shuffle_reader.persistable = True batch_reader = startup_block.create_var( type=fluid.core.VarDesc.VarType.READER, name="BatchReader") batch_reader.persistable = True double_buffer = startup_block.create_var( type=fluid.core.VarDesc.VarType.READER, name="DoubleBuffer") double_buffer.persistable = True main_prog = startup_prog.clone() main_block = main_prog.current_block() create_random_data_generator_op = startup_block.append_op( type="create_random_data_generator", outputs={"Out": random_reader}, attrs={ "shape_concat": [1, 2, 1, 1], "ranks": [2, 2], "low": 0.0, "high": 1.0, 'lod_levels': [0, 0] }) create_shuffle_reader_op = startup_block.append_op( type="create_shuffle_reader", inputs={"UnderlyingReader": random_reader}, outputs={"Out": shuffle_reader}, attrs={"buffer_size": 7}) create_batch_reader_op = startup_block.append_op( type="create_batch_reader", inputs={"UnderlyingReader": shuffle_reader}, outputs={"Out": batch_reader}, attrs={"batch_size": 10}) create_double_buffer_reader_op = startup_block.append_op( type="create_double_buffer_reader", inputs={"UnderlyingReader": batch_reader}, outputs={"Out": double_buffer}) out1 = main_block.create_var( type=fluid.core.VarDesc.VarType.LOD_TENSOR, name="Out1") out2 = main_block.create_var( type=fluid.core.VarDesc.VarType.LOD_TENSOR, name="Out2") main_block.var("DoubleBuffer").desc.set_shapes(double_buffer.desc.shapes()) main_block.var("DoubleBuffer").desc.set_dtypes(double_buffer.desc.dtypes()) main_block.var("DoubleBuffer").desc.set_lod_levels( double_buffer.desc.lod_levels()) read_op = main_block.append_op( type="read", inputs={"Reader": double_buffer}, outputs={"Out": [out1, out2]}) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_prog) for i in range(1, 100): [res1, res2] = exe.run(main_prog, fetch_list=[out1, out2]) if not (res1.shape == (10, 2) and res2.shape == (10, 1)): exit(1)
3,178
33.182796
75
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_lod_tensor.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid as fluid from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor, _validate_lod, _convert_lod import numpy import unittest class TestLoDTensor(unittest.TestCase): def test_validate_lod(self): lod = (1, 2, 1) self.assertRaises(AssertionError, _validate_lod, lod, -1) lod = [[1, 2], (2, 3)] self.assertRaises(AssertionError, _validate_lod, lod, -1) lod = [1, 2, 3] self.assertRaises(AssertionError, _validate_lod, lod, -1) lod = [] self.assertTrue(_validate_lod(lod, -1)) lod = [[], [1], [3]] self.assertFalse(_validate_lod(lod, -1)) lod = [[0], [-1], [3]] self.assertFalse(_validate_lod(lod, -1)) # Each level's sum should be equal to the number of items in the next level # Moreover, last level's sum should be equal to the tensor height lod = [[2, 3], [1, 3, 1, 2, 1]] self.assertTrue(_validate_lod(lod, tensor_height=8)) lod = [[1, 3], [2, 1, 3]] self.assertFalse(_validate_lod(lod, tensor_height=6)) lod = [[1, 3], [2, 1, 3, 4]] self.assertFalse(_validate_lod(lod, tensor_height=5)) def test_convert_lod(self): lod = [[1, 2, 3]] converted_lod = [[0, 1, 3, 6]] self.assertEqual(_convert_lod(lod), converted_lod) lod = [[2, 3], [1, 3, 1, 2, 1]] converted_lod = [[0, 2, 5], [0, 1, 4, 5, 7, 8]] self.assertEqual(_convert_lod(lod), converted_lod) def test_create_lod_tensor(self): # Create LoDTensor from a list data = [[1, 2, 3], [3, 4]] wrong_lod = [[2, 2]] correct_lod = [[3, 2]] self.assertRaises(AssertionError, create_lod_tensor, data, wrong_lod, fluid.CPUPlace()) tensor = create_lod_tensor(data, correct_lod, fluid.CPUPlace()) self.assertEqual(tensor.lod(), [[0, 3, 5]]) # Create LoDTensor from numpy array data = numpy.random.random([10, 1]) lod = [[2, 1], [3, 3, 4]] tensor = create_lod_tensor(data, lod, fluid.CPUPlace()) self.assertEqual(tensor.lod(), [[0, 2, 3], [0, 3, 6, 10]]) # Create LoDTensor from another LoDTensor, they are differnt instances new_lod = [[2, 2, 1], [1, 2, 2, 3, 2]] new_tensor = create_lod_tensor(tensor, new_lod, fluid.CPUPlace()) self.assertEqual(tensor.lod(), [[0, 2, 3], [0, 3, 6, 10]]) self.assertEqual(new_tensor.lod(), [[0, 2, 4, 5], [0, 1, 3, 5, 8, 10]]) def test_create_random_int_lodtensor(self): # The shape of a word, commonly used in speech and NLP problem, is [1] shape = [1] lod = [[2, 3, 5]] dict_size = 10000 low = 0 high = dict_size - 1 tensor = create_random_int_lodtensor(lod, shape, fluid.CPUPlace(), low, high) self.assertEqual(tensor.lod(), [[0, 2, 5, 10]]) self.assertEqual(tensor.shape(), [10, 1]) if __name__ == '__main__': unittest.main()
3,679
39
111
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_concurrency.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import framework, unique_name, layer_helper from paddle.fluid.executor import Executor from paddle.fluid.layers import fill_constant, assign, While, elementwise_add, Print class TestRoutineOp(unittest.TestCase): def test_simple_routine(self): ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) # Create LOD_TENSOR<INT64> and put it into the scope. This placeholder # variable will be filled in and returned by fluid.channel_recv result = self._create_tensor('return_value', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.INT64) with fluid.Go(): input_value = fill_constant( shape=[1], dtype=core.VarDesc.VarType.FP64, value=1234) fluid.channel_send(ch, input_value) result, status = fluid.channel_recv(ch, result) fluid.channel_close(ch) cpu = core.CPUPlace() exe = Executor(cpu) outs = exe.run(fetch_list=[result]) self.assertEqual(outs[0], 1234) def test_daisy_chain(self): ''' Mimics classic Daisy-chain test: https://talks.golang.org/2012/concurrency.slide#39 ''' n = 100 leftmost = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) left = leftmost # TODO(thuan): Use fluid.While() after scope capture is implemented. # https://github.com/PaddlePaddle/Paddle/issues/8502 for i in range(n): right = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) with fluid.Go(): one_tensor = self._create_one_dim_tensor(1) result = self._create_tensor('return_value', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.INT64) result, status = fluid.channel_recv(right, result) one_added = fluid.layers.elementwise_add(x=one_tensor, y=result) fluid.channel_send(left, one_added) left = right # Trigger the channel propagation by sending a "1" to rightmost channel with fluid.Go(): one_tensor = self._create_one_dim_tensor(1) fluid.channel_send(right, one_tensor) leftmost_result = self._create_tensor('return_value', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.INT64) leftmost_result, status = fluid.channel_recv(leftmost, leftmost_result) cpu = core.CPUPlace() exe = Executor(cpu) leftmost_data = exe.run(fetch_list=[leftmost_result]) # The leftmost_data should be equal to the number of channels + 1 self.assertEqual(leftmost_data[0][0], n + 1) def _create_one_dim_tensor(self, value): one_dim_tensor = fill_constant(shape=[1], dtype='int', value=value) one_dim_tensor.stop_gradient = True return one_dim_tensor def _create_tensor(self, name, type, dtype): return framework.default_main_program().current_block().create_var( name=unique_name.generate(name), type=type, dtype=dtype) def _create_persistable_tensor(self, name, type, dtype): return framework.default_main_program().current_block().create_var( name=unique_name.generate(name), type=type, dtype=dtype, persistable=True) def test_select(self): with framework.program_guard(framework.Program()): ch1 = fluid.make_channel( dtype=core.VarDesc.VarType.LOD_TENSOR, capacity=1) result1 = self._create_tensor('return_value', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.FP64) input_value = fill_constant( shape=[1], dtype=core.VarDesc.VarType.FP64, value=10) with fluid.Select() as select: with select.case(fluid.channel_send, ch1, input_value): # Execute something. pass with select.default(): pass # This should not block because we are using a buffered channel. result1, status = fluid.channel_recv(ch1, result1) fluid.channel_close(ch1) cpu = core.CPUPlace() exe = Executor(cpu) result = exe.run(fetch_list=[result1]) self.assertEqual(result[0][0], 10) def test_fibonacci(self): """ Mimics Fibonacci Go example: https://tour.golang.org/concurrency/5 """ with framework.program_guard(framework.Program()): quit_ch_input_var = self._create_persistable_tensor( 'quit_ch_input', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.INT32) quit_ch_input = fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=0, out=quit_ch_input_var) result = self._create_persistable_tensor( 'result', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.INT32) fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=0, out=result) x = fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) y = fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=1) while_cond = fill_constant( shape=[1], dtype=core.VarDesc.VarType.BOOL, value=True) while_false = fill_constant( shape=[1], dtype=core.VarDesc.VarType.BOOL, value=False) x_tmp = fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) def fibonacci(channel, quit_channel): while_op = While(cond=while_cond) with while_op.block(): result2 = fill_constant( shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) with fluid.Select() as select: with select.case( fluid.channel_send, channel, x, is_copy=True): assign(input=x, output=x_tmp) assign(input=y, output=x) assign(elementwise_add(x=x_tmp, y=y), output=y) with select.case(fluid.channel_recv, quit_channel, result2): # Quit helper = layer_helper.LayerHelper('assign') helper.append_op( type='assign', inputs={'X': [while_false]}, outputs={'Out': [while_cond]}) ch1 = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) with fluid.Go(): for i in xrange(10): fluid.channel_recv(ch1, result) Print(result) fluid.channel_send(quit_ch, quit_ch_input) fibonacci(ch1, quit_ch) fluid.channel_close(ch1) fluid.channel_close(quit_ch) cpu = core.CPUPlace() exe = Executor(cpu) exe_result = exe.run(fetch_list=[result]) self.assertEqual(exe_result[0][0], 34) def test_ping_pong(self): """ Mimics Ping Pong example: https://gobyexample.com/channel-directions """ with framework.program_guard(framework.Program()): result = self._create_tensor('return_value', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.FP64) ping_result = self._create_tensor('ping_return_value', core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.FP64) def ping(ch, message): fluid.channel_send(ch, message, is_copy=True) def pong(ch1, ch2): fluid.channel_recv(ch1, ping_result) fluid.channel_send(ch2, ping_result, is_copy=True) pings = fluid.make_channel( dtype=core.VarDesc.VarType.LOD_TENSOR, capacity=1) pongs = fluid.make_channel( dtype=core.VarDesc.VarType.LOD_TENSOR, capacity=1) msg = fill_constant( shape=[1], dtype=core.VarDesc.VarType.FP64, value=9) ping(pings, msg) pong(pings, pongs) fluid.channel_recv(pongs, result) fluid.channel_close(pings) fluid.channel_close(pongs) cpu = core.CPUPlace() exe = Executor(cpu) exe_result = exe.run(fetch_list=[result]) self.assertEqual(exe_result[0][0], 9) if __name__ == '__main__': unittest.main()
10,057
37.833977
92
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_mnist_if_else_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.fluid.layers as layers from paddle.fluid.framework import Program, program_guard, default_main_program, default_startup_program from paddle.fluid.executor import Executor from paddle.fluid.optimizer import MomentumOptimizer import paddle.fluid.core as core import unittest import numpy as np class TestMNISTIfElseOp(unittest.TestCase): def test_raw_api(self): prog = Program() startup_prog = Program() with program_guard(prog, startup_prog): image = layers.data(name='x', shape=[784], dtype='float32') label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant_batch_size_like( input=label, dtype='int64', shape=[1], value=5.0) cond = layers.less_than(x=label, y=limit) true_image, false_image = layers.split_lod_tensor( input=image, mask=cond) true_out = layers.create_tensor(dtype='float32') true_cond = layers.ConditionalBlock([true_image]) with true_cond.block(): hidden = layers.fc(input=true_image, size=100, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') layers.assign(input=prob, output=true_out) false_out = layers.create_tensor(dtype='float32') false_cond = layers.ConditionalBlock([false_image]) with false_cond.block(): hidden = layers.fc(input=false_image, size=200, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') layers.assign(input=prob, output=false_out) prob = layers.merge_lod_tensor( in_true=true_out, in_false=false_out, mask=cond, x=image) loss = layers.cross_entropy(input=prob, label=label) avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), batch_size=200) place = core.CPUPlace() exe = Executor(place) exe.run(startup_prog) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): x_data = np.array(map(lambda x: x[0], data)).astype("float32") y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = np.expand_dims(y_data, axis=1) outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) print outs[0] if outs[0] < 1.0: return self.assertFalse(True) def test_ifelse(self): prog = Program() startup_prog = Program() with program_guard(prog, startup_prog): image = layers.data(name='x', shape=[784], dtype='float32') label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant_batch_size_like( input=label, dtype='int64', shape=[1], value=5.0) cond = layers.less_than(x=label, y=limit) ie = layers.IfElse(cond) with ie.true_block(): true_image = ie.input(image) hidden = layers.fc(input=true_image, size=100, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') ie.output(prob) with ie.false_block(): false_image = ie.input(image) hidden = layers.fc(input=false_image, size=200, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') ie.output(prob) prob = ie() loss = layers.cross_entropy(input=prob[0], label=label) avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), batch_size=200) place = core.CPUPlace() exe = Executor(place) exe.run(kwargs['startup_program']) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): x_data = np.array(map(lambda x: x[0], data)).astype("float32") y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) outs = exe.run(kwargs['main_program'], feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) print outs[0] if outs[0] < 1.0: return self.assertFalse(True) if __name__ == '__main__': # temp disable if else unittest since it could be buggy. exit(0)
5,821
38.073826
104
py
Paddle
Paddle-master/python/paddle/fluid/tests/test_detection.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.framework import Program, program_guard import unittest class TestDetection(unittest.TestCase): def test_detection_output(self): program = Program() with program_guard(program): pb = layers.data( name='prior_box', shape=[10, 4], append_batch_size=False, dtype='float32') pbv = layers.data( name='prior_box_var', shape=[10, 4], append_batch_size=False, dtype='float32') loc = layers.data( name='target_box', shape=[2, 10, 4], append_batch_size=False, dtype='float32') scores = layers.data( name='scores', shape=[2, 10, 20], append_batch_size=False, dtype='float32') out = layers.detection_output( scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv) self.assertIsNotNone(out) self.assertEqual(out.shape[-1], 6) print(str(program)) def test_detection_api(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[4], dtype='float32') y = layers.data(name='y', shape=[4], dtype='float32') z = layers.data(name='z', shape=[4], dtype='float32', lod_level=1) iou = layers.iou_similarity(x=x, y=y) bcoder = layers.box_coder( prior_box=x, prior_box_var=y, target_box=z, code_type='encode_center_size') self.assertIsNotNone(iou) self.assertIsNotNone(bcoder) matched_indices, matched_dist = layers.bipartite_match(iou) self.assertIsNotNone(matched_indices) self.assertIsNotNone(matched_dist) gt = layers.data( name='gt', shape=[1, 1], dtype='int32', lod_level=1) trg, trg_weight = layers.target_assign( gt, matched_indices, mismatch_value=0) self.assertIsNotNone(trg) self.assertIsNotNone(trg_weight) gt2 = layers.data( name='gt2', shape=[10, 4], dtype='float32', lod_level=1) trg, trg_weight = layers.target_assign( gt2, matched_indices, mismatch_value=0) self.assertIsNotNone(trg) self.assertIsNotNone(trg_weight) print(str(program)) def test_ssd_loss(self): program = Program() with program_guard(program): pb = layers.data( name='prior_box', shape=[10, 4], append_batch_size=False, dtype='float32') pbv = layers.data( name='prior_box_var', shape=[10, 4], append_batch_size=False, dtype='float32') loc = layers.data(name='target_box', shape=[10, 4], dtype='float32') scores = layers.data(name='scores', shape=[10, 21], dtype='float32') gt_box = layers.data( name='gt_box', shape=[4], lod_level=1, dtype='float32') gt_label = layers.data( name='gt_label', shape=[1], lod_level=1, dtype='int32') loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) self.assertIsNotNone(loss) self.assertEqual(loss.shape[-1], 1) print(str(program)) class TestPriorBox(unittest.TestCase): def test_prior_box(self): data_shape = [3, 224, 224] images = fluid.layers.data( name='pixel', shape=data_shape, dtype='float32') conv1 = fluid.layers.conv2d(images, 3, 3, 2) box, var = layers.prior_box( input=conv1, image=images, min_sizes=[100.0], aspect_ratios=[1.], flip=True, clip=True) assert len(box.shape) == 4 assert box.shape == var.shape assert box.shape[3] == 4 class TestMultiBoxHead(unittest.TestCase): def test_multi_box_head(self): data_shape = [3, 224, 224] mbox_locs, mbox_confs, box, var = self.multi_box_head_output(data_shape) assert len(box.shape) == 2 assert box.shape == var.shape assert box.shape[1] == 4 assert mbox_locs.shape[1] == mbox_confs.shape[1] def multi_box_head_output(self, data_shape): images = fluid.layers.data( name='pixel', shape=data_shape, dtype='float32') conv1 = fluid.layers.conv2d(images, 3, 3, 2) conv2 = fluid.layers.conv2d(conv1, 3, 3, 2) conv3 = fluid.layers.conv2d(conv2, 3, 3, 2) conv4 = fluid.layers.conv2d(conv3, 3, 3, 2) conv5 = fluid.layers.conv2d(conv4, 3, 3, 2) mbox_locs, mbox_confs, box, var = layers.multi_box_head( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], image=images, num_classes=21, min_ratio=20, max_ratio=90, aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], base_size=300, offset=0.5, flip=True, clip=True) return mbox_locs, mbox_confs, box, var class TestDetectionMAP(unittest.TestCase): def test_detection_map(self): program = Program() with program_guard(program): detect_res = layers.data( name='detect_res', shape=[10, 6], append_batch_size=False, dtype='float32') label = layers.data( name='label', shape=[10, 6], append_batch_size=False, dtype='float32') map_out = layers.detection_map(detect_res, label, 21) self.assertIsNotNone(map_out) self.assertEqual(map_out.shape, (1, )) print(str(program)) if __name__ == '__main__': unittest.main()
6,773
35.224599
80
py
Paddle
Paddle-master/python/paddle/fluid/tests/__init__.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
612
42.785714
74
py