code stringlengths 17 6.64M |
|---|
def getTests(path, litConfig, testSuiteCache, localConfigCache):
(ts, path_in_suite) = getTestSuite(path, litConfig, testSuiteCache)
if (ts is None):
litConfig.warning(('unable to find test suite for %r' % path))
return ((), ())
if litConfig.debug:
litConfig.note(('resolved input %r to %r::%r' % (path, ts.name, path_in_suite)))
return (ts, getTestsInSuite(ts, path_in_suite, litConfig, testSuiteCache, localConfigCache))
|
def getTestsInSuite(ts, path_in_suite, litConfig, testSuiteCache, localConfigCache):
source_path = ts.getSourcePath(path_in_suite)
if (not os.path.exists(source_path)):
return
if (not os.path.isdir(source_path)):
lc = getLocalConfig(ts, path_in_suite[:(- 1)], litConfig, localConfigCache)
(yield Test.Test(ts, path_in_suite, lc))
return
lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
if (lc.test_format is not None):
for res in lc.test_format.getTestsInDirectory(ts, path_in_suite, litConfig, lc):
(yield res)
for filename in os.listdir(source_path):
if ((filename in ('Output', '.svn', '.git')) or (filename in lc.excludes)):
continue
file_sourcepath = os.path.join(source_path, filename)
if (not os.path.isdir(file_sourcepath)):
continue
subpath = (path_in_suite + (filename,))
file_execpath = ts.getExecPath(subpath)
if dirContainsTestSuite(file_execpath, litConfig):
(sub_ts, subpath_in_suite) = getTestSuite(file_execpath, litConfig, testSuiteCache)
elif dirContainsTestSuite(file_sourcepath, litConfig):
(sub_ts, subpath_in_suite) = getTestSuite(file_sourcepath, litConfig, testSuiteCache)
else:
sub_ts = None
if (sub_ts is ts):
continue
if (sub_ts is not None):
subiter = getTestsInSuite(sub_ts, subpath_in_suite, litConfig, testSuiteCache, localConfigCache)
else:
subiter = getTestsInSuite(ts, subpath, litConfig, testSuiteCache, localConfigCache)
N = 0
for res in subiter:
N += 1
(yield res)
if (sub_ts and (not N)):
litConfig.warning(('test suite %r contained no tests' % sub_ts.name))
|
def find_tests_for_inputs(lit_config, inputs):
'\n find_tests_for_inputs(lit_config, inputs) -> [Test]\n\n Given a configuration object and a list of input specifiers, find all the\n tests to execute.\n '
actual_inputs = []
for input in inputs:
if (os.path.exists(input) or (not input.startswith('@'))):
actual_inputs.append(input)
else:
f = open(input[1:])
try:
for ln in f:
ln = ln.strip()
if ln:
actual_inputs.append(ln)
finally:
f.close()
tests = []
test_suite_cache = {}
local_config_cache = {}
for input in actual_inputs:
prev = len(tests)
tests.extend(getTests(input, lit_config, test_suite_cache, local_config_cache)[1])
if (prev == len(tests)):
lit_config.warning(('input %r contained no tests' % input))
if lit_config.numErrors:
sys.stderr.write(('%d errors, exiting.\n' % lit_config.numErrors))
sys.exit(2)
return tests
|
def load_test_suite(inputs):
import platform
import unittest
from lit.LitTestCase import LitTestCase
litConfig = LitConfig.LitConfig(progname='lit', path=[], quiet=False, useValgrind=False, valgrindLeakCheck=False, valgrindArgs=[], noExecute=False, debug=False, isWindows=(platform.system() == 'Windows'), params={})
run = lit.run.Run(litConfig, find_tests_for_inputs(litConfig, inputs))
return unittest.TestSuite([LitTestCase(test, run) for test in run.tests])
|
def executeCommand(command, input):
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
exitCode = p.wait()
if (exitCode == (- signal.SIGINT)):
raise KeyboardInterrupt
try:
out = str(out.decode('ascii'))
except:
out = str(out)
try:
err = str(err.decode('ascii'))
except:
err = str(err)
return (out, err, exitCode)
|
def readFile(path):
fd = open(path, 'r')
return fd.read()
|
class AliveTest(FileBasedTest):
def __init__(self):
self.regex = re.compile(';\\s*(ERROR:.*)')
self.regex_args = re.compile(';\\s*TEST-ARGS:(.*)')
def execute(self, test, litConfig):
test = test.getSourcePath()
cmd = ['python', 'run.py', test]
input = readFile(test)
m = self.regex_args.search(input)
if (m != None):
cmd += m.group(1).split()
(out, err, exitCode) = executeCommand(cmd, input)
m = self.regex.search(input)
if (m == None):
if ((exitCode == 0) and (string.find(out, 'Optimization is correct') != (- 1))):
return (lit.Test.PASS, '')
return (lit.Test.FAIL, (out + err))
if ((exitCode == 1) and (string.find(out, m.group(1)) != (- 1))):
return (lit.Test.PASS, '')
return (lit.Test.FAIL, (out + err))
|
class TestFormat(object):
pass
|
class FileBasedTest(TestFormat):
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
if (filename.startswith('.') or (filename in localConfig.excludes)):
continue
filepath = os.path.join(source_path, filename)
if (not os.path.isdir(filepath)):
(base, ext) = os.path.splitext(filename)
if (ext in localConfig.suffixes):
(yield lit.Test.Test(testSuite, (path_in_suite + (filename,)), localConfig))
|
class OneCommandPerFileTest(TestFormat):
def __init__(self, command, dir, recursive=False, pattern='.*', useTempInput=False):
if isinstance(command, str):
self.command = [command]
else:
self.command = list(command)
if (dir is not None):
dir = str(dir)
self.dir = dir
self.recursive = bool(recursive)
self.pattern = re.compile(pattern)
self.useTempInput = useTempInput
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
dir = self.dir
if (dir is None):
dir = testSuite.getSourcePath(path_in_suite)
for (dirname, subdirs, filenames) in os.walk(dir):
if (not self.recursive):
subdirs[:] = []
subdirs[:] = [d for d in subdirs if ((d != '.svn') and (d not in localConfig.excludes))]
for filename in filenames:
if (filename.startswith('.') or (not self.pattern.match(filename)) or (filename in localConfig.excludes)):
continue
path = os.path.join(dirname, filename)
suffix = path[len(dir):]
if suffix.startswith(os.sep):
suffix = suffix[1:]
test = lit.Test.Test(testSuite, (path_in_suite + tuple(suffix.split(os.sep))), localConfig)
test.source_path = path
(yield test)
def createTempInput(self, tmp, test):
abstract
def execute(self, test, litConfig):
if test.config.unsupported:
return (lit.Test.UNSUPPORTED, 'Test is unsupported')
cmd = list(self.command)
if self.useTempInput:
tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
self.createTempInput(tmp, test)
tmp.flush()
cmd.append(tmp.name)
elif hasattr(test, 'source_path'):
cmd.append(test.source_path)
else:
cmd.append(test.getSourcePath())
(out, err, exitCode) = lit.util.executeCommand(cmd)
diags = (out + err)
if ((not exitCode) and (not diags.strip())):
return (lit.Test.PASS, '')
report = ('Command: %s\n' % ' '.join([("'%s'" % a) for a in cmd]))
if self.useTempInput:
report += ('Temporary File: %s\n' % tmp.name)
report += ('--\n%s--\n' % open(tmp.name).read())
report += ('Output:\n--\n%s--' % diags)
return (lit.Test.FAIL, report)
|
class TestingProgressDisplay(object):
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
self.progressBar = progressBar
self.completed = 0
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
def update(self, test):
self.completed += 1
if self.opts.incremental:
update_incremental_cache(test)
if self.progressBar:
self.progressBar.update((float(self.completed) / self.numTests), test.getFullName())
if ((not test.result.code.isFailure) and (self.opts.quiet or self.opts.succinct)):
return
if self.progressBar:
self.progressBar.clear()
test_name = test.getFullName()
print(('%s: %s (%d of %d)' % (test.result.code.name, test_name, self.completed, self.numTests)))
if (test.result.code.isFailure and self.opts.showOutput):
print(("%s TEST '%s' FAILED %s" % (('*' * 20), test.getFullName(), ('*' * 20))))
print(test.result.output)
print(('*' * 20))
if test.result.metrics:
print(("%s TEST '%s' RESULTS %s" % (('*' * 10), test.getFullName(), ('*' * 10))))
items = sorted(test.result.metrics.items())
for (metric_name, value) in items:
print(('%s: %s ' % (metric_name, value.format())))
print(('*' * 10))
sys.stdout.flush()
|
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
data = {}
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
data['tests'] = tests_data = []
for test in run.tests:
test_data = {'name': test.getFullName(), 'code': test.result.code.name, 'output': test.result.output, 'elapsed': test.result.elapsed}
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for (key, value) in test.result.metrics.items():
metrics_data[key] = value.todata()
tests_data.append(test_data)
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
|
def update_incremental_cache(test):
if (not test.result.code.isFailure):
return
fname = test.getFilePath()
os.utime(fname, None)
|
def sort_by_incremental_cache(run):
def sortIndex(test):
fname = test.getFilePath()
try:
return (- os.path.getmtime(fname))
except:
return 0
run.tests.sort(key=(lambda t: sortIndex(t)))
|
def main(builtinParameters={}):
isWindows = (platform.system() == 'Windows')
useProcessesIsDefault = (not isWindows)
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser('usage: %prog [options] {file-or-path}')
parser.add_option('', '--version', dest='show_version', help='Show version and exit', action='store_true', default=False)
parser.add_option('-j', '--threads', dest='numThreads', metavar='N', help='Number of testing threads', type=int, action='store', default=None)
parser.add_option('', '--config-prefix', dest='configPrefix', metavar='NAME', help="Prefix for 'lit' config files", action='store', default=None)
parser.add_option('', '--param', dest='userParameters', metavar='NAME=VAL', help="Add 'NAME' = 'VAL' to the user defined parameters", type=str, action='append', default=[])
group = OptionGroup(parser, 'Output Format')
group.add_option('-q', '--quiet', dest='quiet', help='Suppress no error output', action='store_true', default=False)
group.add_option('-s', '--succinct', dest='succinct', help='Reduce amount of output', action='store_true', default=False)
group.add_option('-v', '--verbose', dest='showOutput', help='Show all test output', action='store_true', default=False)
group.add_option('-o', '--output', dest='output_path', help='Write test results to the provided path', action='store', type=str, metavar='PATH')
group.add_option('', '--no-progress-bar', dest='useProgressBar', help='Do not use curses based progress bar', action='store_false', default=True)
parser.add_option_group(group)
group = OptionGroup(parser, 'Test Execution')
group.add_option('', '--path', dest='path', help='Additional paths to add to testing environment', action='append', type=str, default=[])
group.add_option('', '--vg', dest='useValgrind', help='Run tests under valgrind', action='store_true', default=False)
group.add_option('', '--vg-leak', dest='valgrindLeakCheck', help='Check for memory leaks under valgrind', action='store_true', default=False)
group.add_option('', '--vg-arg', dest='valgrindArgs', metavar='ARG', help='Specify an extra argument for valgrind', type=str, action='append', default=[])
group.add_option('', '--time-tests', dest='timeTests', help='Track elapsed wall time for each test', action='store_true', default=False)
group.add_option('', '--no-execute', dest='noExecute', help="Don't execute any tests (assume PASS)", action='store_true', default=False)
parser.add_option_group(group)
group = OptionGroup(parser, 'Test Selection')
group.add_option('', '--max-tests', dest='maxTests', metavar='N', help='Maximum number of tests to run', action='store', type=int, default=None)
group.add_option('', '--max-time', dest='maxTime', metavar='N', help='Maximum time to spend testing (in seconds)', action='store', type=float, default=None)
group.add_option('', '--shuffle', dest='shuffle', help='Run tests in random order', action='store_true', default=False)
group.add_option('-i', '--incremental', dest='incremental', help='Run modified and failing tests first (updates mtimes)', action='store_true', default=False)
group.add_option('', '--filter', dest='filter', metavar='REGEX', help='Only run tests with paths matching the given regular expression', action='store', default=None)
parser.add_option_group(group)
group = OptionGroup(parser, 'Debug and Experimental Options')
group.add_option('', '--debug', dest='debug', help="Enable debugging (for 'lit' development)", action='store_true', default=False)
group.add_option('', '--show-suites', dest='showSuites', help='Show discovered test suites', action='store_true', default=False)
group.add_option('', '--show-tests', dest='showTests', help='Show all discovered tests', action='store_true', default=False)
group.add_option('', '--use-processes', dest='useProcesses', help='Run tests in parallel with processes (not threads)', action='store_true', default=useProcessesIsDefault)
group.add_option('', '--use-threads', dest='useProcesses', help='Run tests in parallel with threads (not processes)', action='store_false', default=useProcessesIsDefault)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if opts.show_version:
print(('lit %s' % (lit.__version__,)))
return
if (not args):
parser.error('No inputs specified')
if (opts.numThreads is None):
if (sys.hexversion >= 33882624):
opts.numThreads = lit.util.detectCPUs()
else:
opts.numThreads = 1
inputs = args
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if ('=' not in entry):
(name, val) = (entry, '')
else:
(name, val) = entry.split('=', 1)
userParams[name] = val
litConfig = lit.LitConfig.LitConfig(progname=os.path.basename(sys.argv[0]), path=opts.path, quiet=opts.quiet, useValgrind=opts.useValgrind, valgrindLeakCheck=opts.valgrindLeakCheck, valgrindArgs=opts.valgrindArgs, noExecute=opts.noExecute, debug=opts.debug, isWindows=isWindows, params=userParams, config_prefix=opts.configPrefix)
run = lit.run.Run(litConfig, lit.discovery.find_tests_for_inputs(litConfig, inputs))
if (opts.showSuites or opts.showTests):
suitesAndTests = {}
for t in run.tests:
if (t.suite not in suitesAndTests):
suitesAndTests[t.suite] = []
suitesAndTests[t.suite].append(t)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key=(lambda item: item[0].name))
if opts.showSuites:
print('-- Test Suites --')
for (ts, ts_tests) in suitesAndTests:
print((' %s - %d tests' % (ts.name, len(ts_tests))))
print((' Source Root: %s' % ts.source_root))
print((' Exec Root : %s' % ts.exec_root))
if opts.showTests:
print('-- Available Tests --')
for (ts, ts_tests) in suitesAndTests:
ts_tests.sort(key=(lambda test: test.path_in_suite))
for test in ts_tests:
print((' %s' % (test.getFullName(),)))
sys.exit(0)
numTotalTests = len(run.tests)
if opts.filter:
try:
rex = re.compile(opts.filter)
except:
parser.error(('invalid regular expression for --filter: %r' % opts.filter))
run.tests = [t for t in run.tests if rex.search(t.getFullName())]
if opts.shuffle:
random.shuffle(run.tests)
elif opts.incremental:
sort_by_incremental_cache(run)
else:
run.tests.sort(key=(lambda t: t.getFullName()))
if (opts.maxTests is not None):
run.tests = run.tests[:opts.maxTests]
opts.numThreads = min(len(run.tests), opts.numThreads)
extra = ''
if (len(run.tests) != numTotalTests):
extra = (' of %d' % numTotalTests)
header = ('-- Testing: %d%s tests, %d threads --' % (len(run.tests), extra, opts.numThreads))
progressBar = None
if (not opts.quiet):
if (opts.succinct and opts.useProgressBar):
try:
tc = lit.ProgressBar.TerminalController()
progressBar = lit.ProgressBar.ProgressBar(tc, header)
except ValueError:
print(header)
progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
else:
print(header)
startTime = time.time()
display = TestingProgressDisplay(opts, len(run.tests), progressBar)
try:
run.execute_tests(display, opts.numThreads, opts.maxTime, opts.useProcesses)
except KeyboardInterrupt:
sys.exit(2)
display.finish()
testing_time = (time.time() - startTime)
if (not opts.quiet):
print(('Testing Time: %.2fs' % (testing_time,)))
if (opts.output_path is not None):
write_test_results(run, litConfig, testing_time, opts.output_path)
hasFailures = False
byCode = {}
for test in run.tests:
if (test.result.code not in byCode):
byCode[test.result.code] = []
byCode[test.result.code].append(test)
if test.result.code.isFailure:
hasFailures = True
for (title, code) in (('Unexpected Passing Tests', lit.Test.XPASS), ('Failing Tests', lit.Test.FAIL), ('Unresolved Tests', lit.Test.UNRESOLVED)):
elts = byCode.get(code)
if (not elts):
continue
print(('*' * 20))
print(('%s (%d):' % (title, len(elts))))
for test in elts:
print((' %s' % test.getFullName()))
sys.stdout.write('\n')
if (opts.timeTests and run.tests):
test_times = [(test.getFullName(), test.result.elapsed) for test in run.tests]
lit.util.printHistogram(test_times, title='Tests')
for (name, code) in (('Expected Passes ', lit.Test.PASS), ('Expected Failures ', lit.Test.XFAIL), ('Unsupported Tests ', lit.Test.UNSUPPORTED), ('Unresolved Tests ', lit.Test.UNRESOLVED), ('Unexpected Passes ', lit.Test.XPASS), ('Unexpected Failures', lit.Test.FAIL)):
if (opts.quiet and (not code.isFailure)):
continue
N = len(byCode.get(code, []))
if N:
print((' %s: %d' % (name, N)))
if litConfig.numErrors:
sys.stderr.write(('\n%d error(s), exiting.\n' % litConfig.numErrors))
sys.exit(2)
if litConfig.numWarnings:
sys.stderr.write(('\n%d warning(s) in tests.\n' % litConfig.numWarnings))
if hasFailures:
sys.exit(1)
sys.exit(0)
|
class LockedValue(object):
def __init__(self, value):
self.lock = threading.Lock()
self._value = value
def _get_value(self):
self.lock.acquire()
try:
return self._value
finally:
self.lock.release()
def _set_value(self, value):
self.lock.acquire()
try:
self._value = value
finally:
self.lock.release()
value = property(_get_value, _set_value)
|
class TestProvider(object):
def __init__(self, tests, num_jobs, queue_impl, canceled_flag):
self.canceled_flag = canceled_flag
self.queue = queue_impl()
for i in range(len(tests)):
self.queue.put(i)
for i in range(num_jobs):
self.queue.put(None)
def cancel(self):
self.canceled_flag.value = 1
def get(self):
if self.canceled_flag.value:
return None
return self.queue.get()
|
class Tester(object):
def __init__(self, run_instance, provider, consumer):
self.run_instance = run_instance
self.provider = provider
self.consumer = consumer
def run(self):
while True:
item = self.provider.get()
if (item is None):
break
self.run_test(item)
self.consumer.task_finished()
def run_test(self, test_index):
test = self.run_instance.tests[test_index]
try:
self.run_instance.execute_test(test)
except KeyboardInterrupt:
print('\nCtrl-C detected, goodbye.')
os.kill(0, 9)
self.consumer.update(test_index, test)
|
class ThreadResultsConsumer(object):
def __init__(self, display):
self.display = display
self.lock = threading.Lock()
def update(self, test_index, test):
self.lock.acquire()
try:
self.display.update(test)
finally:
self.lock.release()
def task_finished(self):
pass
def handle_results(self):
pass
|
class MultiprocessResultsConsumer(object):
def __init__(self, run, display, num_jobs):
self.run = run
self.display = display
self.num_jobs = num_jobs
self.queue = multiprocessing.Queue()
def update(self, test_index, test):
self.queue.put((test_index, test.result))
def task_finished(self):
self.queue.put(None)
def handle_results(self):
completed = 0
while (completed != self.num_jobs):
item = self.queue.get()
if (item is None):
completed += 1
continue
(index, result) = item
test = self.run.tests[index]
test.result = result
self.display.update(test)
|
def run_one_tester(run, provider, display):
tester = Tester(run, provider, display)
tester.run()
|
class Run(object):
'\n This class represents a concrete, configured testing run.\n '
def __init__(self, lit_config, tests):
self.lit_config = lit_config
self.tests = tests
def execute_test(self, test):
result = None
start_time = time.time()
try:
result = test.config.test_format.execute(test, self.lit_config)
if isinstance(result, tuple):
(code, output) = result
result = lit.Test.Result(code, output)
elif (not isinstance(result, lit.Test.Result)):
raise ValueError('unexpected result from test execution')
except KeyboardInterrupt:
raise
except:
if self.lit_config.debug:
raise
output = 'Exception during script execution:\n'
output += traceback.format_exc()
output += '\n'
result = lit.Test.Result(lit.Test.UNRESOLVED, output)
result.elapsed = (time.time() - start_time)
test.setResult(result)
def execute_tests(self, display, jobs, max_time=None, use_processes=False):
'\n execute_tests(display, jobs, [max_time])\n\n Execute each of the tests in the run, using up to jobs number of\n parallel tasks, and inform the display of each individual result. The\n provided tests should be a subset of the tests available in this run\n object.\n\n If max_time is non-None, it should be a time in seconds after which to\n stop executing tests.\n\n The display object will have its update method called with each test as\n it is completed. The calls are guaranteed to be locked with respect to\n one another, but are *not* guaranteed to be called on the same thread as\n this method was invoked on.\n\n Upon completion, each test in the run will have its result\n computed. Tests which were not actually executed (for any reason) will\n be given an UNRESOLVED result.\n '
consumer = None
if ((jobs != 1) and use_processes and multiprocessing):
try:
task_impl = multiprocessing.Process
queue_impl = multiprocessing.Queue
canceled_flag = multiprocessing.Value('i', 0)
consumer = MultiprocessResultsConsumer(self, display, jobs)
except:
self.lit_config.note('failed to initialize multiprocessing')
consumer = None
if (not consumer):
task_impl = threading.Thread
queue_impl = queue.Queue
canceled_flag = LockedValue(0)
consumer = ThreadResultsConsumer(display)
provider = TestProvider(self.tests, jobs, queue_impl, canceled_flag)
if (win32api is not None):
def console_ctrl_handler(type):
provider.cancel()
return True
win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
if (max_time is not None):
def timeout_handler():
provider.cancel()
timeout_timer = threading.Timer(max_time, timeout_handler)
timeout_timer.start()
if (jobs == 1):
run_one_tester(self, provider, consumer)
else:
self._execute_tests_in_parallel(task_impl, provider, consumer, jobs)
if (max_time is not None):
timeout_timer.cancel()
for test in self.tests:
if (test.result is None):
test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs):
tasks = [task_impl(target=run_one_tester, args=(self, provider, consumer)) for i in range(jobs)]
for t in tasks:
t.start()
consumer.handle_results()
for t in tasks:
t.join()
|
def detectCPUs():
'\n Detects the number of CPUs on a system. Cribbed from pp.\n '
if hasattr(os, 'sysconf'):
if ('SC_NPROCESSORS_ONLN' in os.sysconf_names):
ncpus = os.sysconf('SC_NPROCESSORS_ONLN')
if (isinstance(ncpus, int) and (ncpus > 0)):
return ncpus
else:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
if ('NUMBER_OF_PROCESSORS' in os.environ):
ncpus = int(os.environ['NUMBER_OF_PROCESSORS'])
if (ncpus > 0):
return ncpus
return 1
|
def mkdir_p(path):
'mkdir_p(path) - Make the "path" directory, if it does not exist; this\n will also make directories for any missing parent directories.'
if ((not path) or os.path.exists(path)):
return
parent = os.path.dirname(path)
if (parent != path):
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
if (e.errno != errno.EEXIST):
raise
|
def capture(args, env=None):
'capture(command) - Run the given command (or argv list) in a shell and\n return the standard output.'
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(out, _) = p.communicate()
return out
|
def which(command, paths=None):
'which(command, [paths]) - Look up the given command in the paths string\n (or the PATH environment variable, if unspecified).'
if (paths is None):
paths = os.environ.get('PATH', '')
if os.path.isfile(command):
return command
if (not paths):
paths = os.defpath
if (os.pathsep == ';'):
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, (command + ext))
if os.path.exists(p):
return p
return None
|
def checkToolsPath(dir, tools):
for tool in tools:
if (not os.path.exists(os.path.join(dir, tool))):
return False
return True
|
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
|
def printHistogram(items, title='Items'):
items.sort(key=(lambda item: item[1]))
maxValue = max([v for (_, v) in items])
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = (inc * (10 ** power))
N = int(math.ceil((maxValue / barH)))
if (N > 10):
break
elif (inc == 1):
power -= 1
histo = [set() for i in range(N)]
for (name, v) in items:
bin = min(int(((N * v) / maxValue)), (N - 1))
histo[bin].add(name)
barW = 40
hr = ('-' * (barW + 34))
print(('\nSlowest %s:' % title))
print(hr)
for (name, value) in items[(- 20):]:
print(('%.2fs: %s' % (value, name)))
print(('\n%s Times:' % title))
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, (3 - pDigits))
if pfDigits:
pDigits += (pfDigits + 1)
cDigits = int(math.ceil(math.log(len(items), 10)))
print(('[%s] :: [%s] :: [%s]' % ('Range'.center((((pDigits + 1) * 2) + 3)), 'Percentage'.center(barW), 'Count'.center(((cDigits * 2) + 1)))))
print(hr)
for (i, row) in enumerate(histo):
pct = (float(len(row)) / len(items))
w = int((barW * pct))
print(('[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]' % (pDigits, pfDigits, (i * barH), pDigits, pfDigits, ((i + 1) * barH), ('*' * w), (' ' * (barW - w)), cDigits, len(row), cDigits, len(items))))
|
def executeCommand(command, cwd=None, env=None):
p = subprocess.Popen(command, cwd=cwd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, close_fds=kUseCloseFDs)
(out, err) = p.communicate()
exitCode = p.wait()
if (exitCode == (- signal.SIGINT)):
raise KeyboardInterrupt
try:
out = str(out.decode('ascii'))
except:
out = str(out)
try:
err = str(err.decode('ascii'))
except:
err = str(err)
return (out, err, exitCode)
|
def normalize(x):
x = x.strip()
if (len(x) > max_single_length):
x = (x[:max_single_length] + ' ...')
return x
|
def sample_sentences(xs, k, group_id):
random.shuffle(xs)
return '\n'.join([('Group %s: %s' % (group_id, normalize(x))) for x in xs[:k]])
|
def create_prompt_from_pos_neg_samples(positive_samples, negative_samples, k=K):
group_A_text = sample_sentences(positive_samples, k=k, group_id='A')
group_B_text = sample_sentences(negative_samples, k=k, group_id='B')
prompt = (((group_A_text + '\n\n') + group_B_text) + '\n\n')
prompt += 'Compared to sentences from Group B, each sentence from Group A'
return prompt
|
def describe(pos: List[str], neg: List[str], note: str='', proposer_name: str='t5ruiqi-zhong/t5proposer_0514', verifier_name: str='ruiqi-zhong/t5verifier_0514', save_folder=None):
if (save_folder is None):
save_folder = ('end2end_jobs/' + str(random.random()))
if (not os.path.exists(save_folder)):
os.mkdir(save_folder)
else:
print(('Folder %s exists' % save_folder))
print(('results will be saved to %s' % save_folder))
spec = {'note': note, 'pos': pos, 'neg': neg, 'proposer_name': proposer_name, 'verifier_name': verifier_name}
for k in ['note', 'proposer_name', 'verifier_name']:
print(k, spec[k])
pkl.dump(spec, open(os.path.join(save_folder, 'spec.pkl'), 'wb'))
extreme_vals = return_extreme_values(pos, neg)
pkl.dump(extreme_vals, open(os.path.join(save_folder, 'get_extreme_result.pkl'), 'wb'))
(pos2score, neg2score) = (extreme_vals['pos2score'], extreme_vals['neg2score'])
proposer = init_proposer(proposer_name)
proposed_hypotheses = proposer.propose_hypothesis(pos2score, neg2score)
pkl.dump(proposed_hypotheses, open(os.path.join(save_folder, 'proposed_hypotheses.pkl'), 'wb'))
verifier = init_verifier(verifier_name)
h2result = {}
for h in set(proposed_hypotheses):
h2result[h] = verifier.return_verification(h, pos, neg, 500)
pkl.dump(h2result, open(os.path.join(save_folder, 'scored_hypotheses.pkl'), 'wb'))
return {h: v['h_score'] for (h, v) in h2result.items()}
|
def sample_sentences(xs, k, group_id):
random.shuffle(xs)
return '\n'.join([('Group %s: %s' % (group_id, x)) for x in xs[:k]])
|
def sort_by_score(d):
return sorted(d, key=(lambda k: d[k]), reverse=True)
|
def get_top_percentile(l, p, min_length=10):
n = max(int(((len(l) * p) / 100)), min_length)
return l[:n]
|
class Proposer():
def __init__(self, model_name, template_path):
self.proposer_name = model_name
self.prompt_template = open(template_path).read().strip()
def preprocess_texts(self, x2score):
return [self.normalize(x) for x in sort_by_score(x2score)]
def create_prompt(self, A_block, B_block):
prompt = self.prompt_template.format(A_block=A_block, B_block=B_block)
return prompt
def propose_hypothesis(self, pos2score, neg2score, hyp_count, num_incontext_samples, temperature):
raise NotImplementedError
def normalize(self, x):
raise NotImplementedError
|
class GPT3Proposer(Proposer):
def __init__(self, model_name):
super(GPT3Proposer, self).__init__(model_name, 'templates/gpt3_proposer_template.txt')
self.discouraged_toks = [4514, 8094, 33, 40798, 392, 273, 14, 11, 981, 4514, 8094, 1448, 33, 347, 1884, 40798, 290, 392, 273, 393, 14, 1220, 837, 11]
self.tok = transformers.T5TokenizerFast.from_pretrained('gpt2')
self.hyps = {'max_tokens': 50, 'n': 1, 'top_p': 1, 'engine': self.proposer_name, 'logit_bias': {i: (- 100) for i in self.discouraged_toks}}
def propose_hypothesis(self, pos2score, neg2score, hyp_count=90, num_incontext_samples=5, temperature=GPT3_TEMPERATURE):
(pos_sorted, neg_sorted) = (self.preprocess_texts(pos2score), self.preprocess_texts(neg2score))
all_hs = []
for percentile in percentiles:
pos = get_top_percentile(pos_sorted, percentile)
neg = get_top_percentile(neg_sorted, percentile)
all_hs.extend(self.propose_w_pos_neg(pos, neg, (hyp_count // len(percentiles)), num_incontext_samples, temperature))
return all_hs
def propose_w_pos_neg(self, pos, neg, hyp_count, num_incontext_samples, temperature):
returned_hyps = []
for _ in range(hyp_count):
try_count = 0
while (try_count < max_try_count):
A_block = sample_sentences(pos, k=num_incontext_samples, group_id='A')
B_block = sample_sentences(neg, k=num_incontext_samples, group_id='B')
prompt = self.create_prompt(A_block, B_block)
try:
response = openai.Completion.create(prompt=prompt, stop=['\n', '.'], temperature=temperature, **self.hyps)
h = response['choices'][0]['text'].strip()
returned_hyps.append(h)
break
except KeyboardInterrupt:
exit(0)
except Exception as e:
print(e)
try_count += 1
return returned_hyps
def normalize(self, x):
return self.tok.decode(self.tok(x)['input_ids'][:GPT3_SAMPLE_LENGTH_LIMIT], skip_special_tokens=True)
|
class T5Proposer(Proposer):
def __init__(self, model_name, verbose=True):
super(T5Proposer, self).__init__(model_name, 'templates/t5_ai2_proposer_template.txt')
if verbose:
print('loading model')
self.model = transformers.T5ForConditionalGeneration.from_pretrained(model_name).half().to(device)
self.model.eval()
if verbose:
print('loading finishes')
self.tok = transformers.T5TokenizerFast.from_pretrained('t5-small')
self.discouraged_toks = [[298], [563], [71], [272], [952], [1531], [3, 87], [3, 6]]
def normalize(self, x):
return self.tok.decode(self.tok(x)['input_ids'][:T5_SAMPLE_LENGTH_LIMIT], skip_special_tokens=True)
def propose_hypothesis(self, pos2score, neg2score, hyp_count=90, num_incontext_samples=5, temperature=T5_TEMPERATURE):
(pos_sorted, neg_sorted) = (self.preprocess_texts(pos2score), self.preprocess_texts(neg2score))
all_hs = []
for ensemble_method in ['prob', 'logit']:
for percentile in [10, 20, 100]:
pos = get_top_percentile(pos_sorted, percentile)
neg = get_top_percentile(neg_sorted, percentile)
for num_prompt_ensemble in [1, 3, 5]:
for _ in range((hyp_count // 18)):
prompts = []
for j in range(num_prompt_ensemble):
A_block = sample_sentences(pos, k=num_incontext_samples, group_id='A')
B_block = sample_sentences(neg, k=num_incontext_samples, group_id='B')
prompt = self.create_prompt(A_block, B_block)
prompts.append(prompt)
hs = self.inference_on_ensemble_prompts(prompts, 1, temperature, ensemble_method)
all_hs.extend(hs)
return all_hs
def inference_on_ensemble_prompts(self, prompts, n, temperature, ensemble_method):
input_dict = self.tok(prompts, return_tensors='pt', padding=True).to(device)
input_dict['bad_words_ids'] = self.discouraged_toks
generated_tokens = self.model.generate(**input_dict, output_scores=True, return_dict_in_generate=True, do_sample=True, top_k=0, num_return_sequences=n, temperature=temperature, ensemble_sample=True, ensemble_method=ensemble_method)
completions = self.tok.batch_decode(generated_tokens.sequences, skip_special_tokens=True)
return completions[:n]
|
def init_proposer(proposer_name):
if (proposer_name[:2] == 't5'):
return T5Proposer(proposer_name[2:])
if (proposer_name[:4] == 'gpt3'):
return T5Proposer(proposer_name[4:])
raise Exception(('Proposer %s has not been implemented' % proposer_name))
|
class ISSampler(BatchSampler):
'\n Sampler which alternates between live sampling iterations using BatchSampler\n and importance sampling iterations.\n '
def __init__(self, algo, n_backtrack='all', n_is_pretrain=0, init_is=0, skip_is_itrs=False, hist_variance_penalty=0.0, max_is_ratio=0, ess_threshold=0):
'\n :type algo: BatchPolopt\n :param n_backtrack: Number of past policies to update from\n :param n_is_pretrain: Number of importance sampling iterations to\n perform in beginning of training\n :param init_is: (True/False) set initial iteration (after pretrain) an\n importance sampling iteration\n :param skip_is_itrs: (True/False) do not do any importance sampling\n iterations (after pretrain)\n :param hist_variance_penalty: penalize variance of historical policy\n :param max_is_ratio: maximum allowed importance sampling ratio\n :param ess_threshold: minimum effective sample size required\n '
self.n_backtrack = n_backtrack
self.n_is_pretrain = n_is_pretrain
self.skip_is_itrs = skip_is_itrs
self.hist_variance_penalty = hist_variance_penalty
self.max_is_ratio = max_is_ratio
self.ess_threshold = ess_threshold
self._hist = []
self._is_itr = init_is
super(ISSampler, self).__init__(algo)
@property
def history(self):
'\n History of policies that have interacted with the environment and the\n data from interaction episode(s)\n '
return self._hist
def add_history(self, policy_distribution, paths):
'\n Store policy distribution and paths in history\n '
self._hist.append((policy_distribution, paths))
def get_history_list(self, n_past='all'):
'\n Get list of (distribution, data) tuples from history\n '
if (n_past == 'all'):
return self._hist
return self._hist[(- min(n_past, len(self._hist))):]
def obtain_samples(self, itr):
if (itr < self.n_is_pretrain):
paths = self.obtain_is_samples(itr)
return paths
if (self._is_itr and (not self.skip_is_itrs)):
paths = self.obtain_is_samples(itr)
else:
paths = super(ISSampler, self).obtain_samples(itr)
if (not self.skip_is_itrs):
self.add_history(self.algo.policy.distribution, paths)
self._is_itr = ((self._is_itr + 1) % 2)
return paths
def obtain_is_samples(self, itr):
paths = []
for (hist_policy_distribution, hist_paths) in self.get_history_list(self.n_backtrack):
h_paths = self.sample_isweighted_paths(policy=self.algo.policy, hist_policy_distribution=hist_policy_distribution, max_samples=self.algo.batch_size, max_path_length=self.algo.max_path_length, paths=hist_paths, hist_variance_penalty=self.hist_variance_penalty, max_is_ratio=self.max_is_ratio, ess_threshold=self.ess_threshold)
paths.extend(h_paths)
if (len(paths) > self.algo.batch_size):
paths = random.sample(paths, self.algo.batch_size)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated
def sample_isweighted_paths(self, policy, hist_policy_distribution, max_samples, max_path_length=100, paths=None, randomize_draw=False, hist_variance_penalty=0.0, max_is_ratio=10, ess_threshold=0):
if (not paths):
return []
n_paths = len(paths)
n_samples = min(len(paths), max_samples)
if randomize_draw:
samples = random.sample(paths, n_samples)
elif paths:
if (n_samples == len(paths)):
samples = paths
else:
start = random.randint(0, (len(paths) - n_samples))
samples = paths[start:(start + n_samples)]
samples = copy.deepcopy(samples)
if (ess_threshold > 0):
is_weights = []
dist1 = policy.distribution
dist2 = hist_policy_distribution
for path in samples:
(_, agent_infos) = policy.get_actions(path['observations'])
hist_agent_infos = path['agent_infos']
if (hist_variance_penalty > 0):
hist_agent_infos['log_std'] += log((1.0 + hist_variance_penalty))
path['agent_infos'] = agent_infos
loglike_p = dist1.log_likelihood(path['actions'], agent_infos)
loglike_hp = dist2.log_likelihood(path['actions'], hist_agent_infos)
is_ratio = exp((sum(loglike_p) - sum(loglike_hp)))
if (max_is_ratio > 0):
is_ratio = min(is_ratio, max_is_ratio)
if (ess_threshold > 0):
is_weights.append(is_ratio)
path['rewards'] *= is_ratio
if ess_threshold:
if (kong_ess(is_weights) < ess_threshold):
return []
return samples
|
def kong_ess(weights):
return (len(weights) / (1 + var(weights)))
|
class VG(VariantGenerator):
@variant
def seed(self):
return [x for x in range(2)]
@variant
def fast_batch_size(self):
return [20, 50]
@variant
def fast_learning_rate(self):
return [0.5, 1]
@variant
def meta_batch_size(self):
return [20]
@variant
def meta_learning_rate(self):
return [0.01]
@variant
def kl_weighting(self):
return [0.1, 0.5]
@variant
def latent_dim(self):
return [2]
@variant
def init_std(self):
return [1]
@variant
def exp_name(self, fast_batch_size, fast_learning_rate, meta_batch_size, meta_learning_rate, kl_weighting, latent_dim, init_std, seed):
(yield ((((((((((((((('fbs_' + str(fast_batch_size)) + '_flr_') + str(fast_learning_rate)) + '_mbs_') + str(meta_batch_size)) + '_mlr_') + str(meta_learning_rate)) + '_kl_') + str(kl_weighting)) + '_ldim_') + str(latent_dim)) + '_initStd_') + str(init_std)) + '_seed_') + str(seed)))
|
class Algorithm(object):
pass
|
class RLAlgorithm(Algorithm):
def train(self):
raise NotImplementedError
|
class BatchSampler(BaseSampler):
def __init__(self, algo):
'\n :type algo: BatchPolopt\n '
self.algo = algo
def start_worker(self):
parallel_sampler.populate_task(self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(policy_params=cur_params, max_samples=self.algo.batch_size, max_path_length=self.algo.max_path_length, scope=self.algo.scope)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated
|
class BatchPolopt(RLAlgorithm):
'\n Base class for batch sampling-based policy optimization methods.\n This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.\n '
def __init__(self, env, policy, baseline, scope=None, n_itr=500, start_itr=0, batch_size=5000, max_path_length=500, discount=0.99, gae_lambda=1, plot=False, pause_for_plot=False, center_adv=True, positive_adv=False, store_paths=False, whole_paths=True, sampler_cls=None, sampler_args=None, **kwargs):
'\n :param env: Environment\n :param policy: Policy\n :type policy: Policy\n :param baseline: Baseline\n :param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms\n simultaneously, each using different environments and policies\n :param n_itr: Number of iterations.\n :param start_itr: Starting iteration.\n :param batch_size: Number of samples per iteration.\n :param max_path_length: Maximum length of a single rollout.\n :param discount: Discount.\n :param gae_lambda: Lambda used for generalized advantage estimation.\n :param plot: Plot evaluation run after each iteration.\n :param pause_for_plot: Whether to pause before contiuing when plotting.\n :param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.\n :param positive_adv: Whether to shift the advantages so that they are always positive. When used in\n conjunction with center_adv the advantages will be standardized before shifting.\n :param store_paths: Whether to save all paths data to the snapshot.\n '
self.env = env
self.policy = policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.current_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
if (sampler_cls is None):
sampler_cls = BatchSampler
if (sampler_args is None):
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
def start_worker(self):
self.sampler.start_worker()
if self.plot:
plotter.init_plot(self.env, self.policy)
def shutdown_worker(self):
self.sampler.shutdown_worker()
def train(self):
self.start_worker()
self.init_opt()
for itr in range(self.current_itr, self.n_itr):
with logger.prefix(('itr #%d | ' % itr)):
paths = self.sampler.obtain_samples(itr)
samples_data = self.sampler.process_samples(itr, paths)
self.log_diagnostics(paths)
self.optimize_policy(itr, samples_data)
logger.log('saving snapshot...')
params = self.get_itr_snapshot(itr, samples_data)
self.current_itr = (itr + 1)
params['algo'] = self
if self.store_paths:
params['paths'] = samples_data['paths']
logger.save_itr_params(itr, params)
logger.log('saved')
logger.dump_tabular(with_prefix=False)
if self.plot:
self.update_plot()
if self.pause_for_plot:
input('Plotting evaluation run: Press Enter to continue...')
self.shutdown_worker()
def log_diagnostics(self, paths):
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
'\n Initialize the optimization procedure. If using theano / cgt, this may\n include declaring all the variables and compiling functions\n '
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
'\n Returns all the data that should be saved in the snapshot for this\n iteration.\n '
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
|
def _worker_rollout_policy(G, args):
sample_std = args['sample_std'].flatten()
cur_mean = args['cur_mean'].flatten()
K = len(cur_mean)
params = ((np.random.standard_normal(K) * sample_std) + cur_mean)
G.policy.set_param_values(params)
path = rollout(G.env, G.policy, args['max_path_length'])
path['returns'] = discount_cumsum(path['rewards'], args['discount'])
path['undiscounted_return'] = sum(path['rewards'])
if (args['criterion'] == 'samples'):
inc = len(path['rewards'])
elif (args['criterion'] == 'paths'):
inc = 1
else:
raise NotImplementedError
return ((params, path), inc)
|
class CEM(RLAlgorithm, Serializable):
def __init__(self, env, policy, n_itr=500, max_path_length=500, discount=0.99, init_std=1.0, n_samples=100, batch_size=None, best_frac=0.05, extra_std=1.0, extra_decay_time=100, plot=False, **kwargs):
'\n :param n_itr: Number of iterations.\n :param max_path_length: Maximum length of a single rollout.\n :param batch_size: # of samples from trajs from param distribution, when this\n is set, n_samples is ignored\n :param discount: Discount.\n :param plot: Plot evaluation run after each iteration.\n :param init_std: Initial std for param distribution\n :param extra_std: Decaying std added to param distribution at each iteration\n :param extra_decay_time: Iterations that it takes to decay extra std\n :param n_samples: #of samples from param distribution\n :param best_frac: Best fraction of the sampled params\n :return:\n '
Serializable.quick_init(self, locals())
self.env = env
self.policy = policy
self.batch_size = batch_size
self.plot = plot
self.extra_decay_time = extra_decay_time
self.extra_std = extra_std
self.best_frac = best_frac
self.n_samples = n_samples
self.init_std = init_std
self.discount = discount
self.max_path_length = max_path_length
self.n_itr = n_itr
def train(self):
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
cur_std = self.init_std
cur_mean = self.policy.get_param_values()
n_best = max(1, int((self.n_samples * self.best_frac)))
for itr in range(self.n_itr):
extra_var_mult = max((1.0 - (itr / self.extra_decay_time)), 0)
sample_std = np.sqrt((np.square(cur_std) + (np.square(self.extra_std) * extra_var_mult)))
if (self.batch_size is None):
criterion = 'paths'
threshold = self.n_samples
else:
criterion = 'samples'
threshold = self.batch_size
infos = stateful_pool.singleton_pool.run_collect(_worker_rollout_policy, threshold=threshold, args=(dict(cur_mean=cur_mean, sample_std=sample_std, max_path_length=self.max_path_length, discount=self.discount, criterion=criterion),))
xs = np.asarray([info[0] for info in infos])
paths = [info[1] for info in infos]
fs = np.array([path['returns'][0] for path in paths])
print((xs.shape, fs.shape))
best_inds = (- fs).argsort()[:n_best]
best_xs = xs[best_inds]
cur_mean = best_xs.mean(axis=0)
cur_std = best_xs.std(axis=0)
best_x = best_xs[0]
logger.push_prefix(('itr #%d | ' % itr))
logger.record_tabular('Iteration', itr)
logger.record_tabular('CurStdMean', np.mean(cur_std))
undiscounted_returns = np.array([path['undiscounted_return'] for path in paths])
logger.record_tabular('AverageReturn', np.mean(undiscounted_returns))
logger.record_tabular('StdReturn', np.mean(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
logger.record_tabular('AverageDiscountedReturn', np.mean(fs))
logger.record_tabular('AvgTrajLen', np.mean([len(path['returns']) for path in paths]))
logger.record_tabular('NumTrajs', len(paths))
self.policy.set_param_values(best_x)
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
logger.save_itr_params(itr, dict(itr=itr, policy=self.policy, env=self.env, cur_mean=cur_mean, cur_std=cur_std))
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
parallel_sampler.terminate_task()
|
def sample_return(G, params, max_path_length, discount):
G.policy.set_param_values(params)
path = rollout(G.env, G.policy, max_path_length)
path['returns'] = discount_cumsum(path['rewards'], discount)
path['undiscounted_return'] = sum(path['rewards'])
return path
|
class CMAES(RLAlgorithm, Serializable):
def __init__(self, env, policy, n_itr=500, max_path_length=500, discount=0.99, sigma0=1.0, batch_size=None, plot=False, **kwargs):
'\n :param n_itr: Number of iterations.\n :param max_path_length: Maximum length of a single rollout.\n :param batch_size: # of samples from trajs from param distribution, when this\n is set, n_samples is ignored\n :param discount: Discount.\n :param plot: Plot evaluation run after each iteration.\n :param sigma0: Initial std for param dist\n :return:\n '
Serializable.quick_init(self, locals())
self.env = env
self.policy = policy
self.plot = plot
self.sigma0 = sigma0
self.discount = discount
self.max_path_length = max_path_length
self.n_itr = n_itr
self.batch_size = batch_size
def train(self):
cur_std = self.sigma0
cur_mean = self.policy.get_param_values()
es = cma_es_lib.CMAEvolutionStrategy(cur_mean, cur_std)
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
cur_std = self.sigma0
cur_mean = self.policy.get_param_values()
itr = 0
while ((itr < self.n_itr) and (not es.stop())):
if (self.batch_size is None):
xs = es.ask()
xs = np.asarray(xs)
infos = stateful_pool.singleton_pool.run_map(sample_return, [(x, self.max_path_length, self.discount) for x in xs])
else:
cum_len = 0
infos = []
xss = []
done = False
while (not done):
sbs = (stateful_pool.singleton_pool.n_parallel * 2)
xs = es.ask(sbs)
xs = np.asarray(xs)
xss.append(xs)
sinfos = stateful_pool.singleton_pool.run_map(sample_return, [(x, self.max_path_length, self.discount) for x in xs])
for info in sinfos:
infos.append(info)
cum_len += len(info['returns'])
if (cum_len >= self.batch_size):
xs = np.concatenate(xss)
done = True
break
fs = (- np.array([info['returns'][0] for info in infos]))
xs = xs[:len(fs)]
es.tell(xs, fs)
logger.push_prefix(('itr #%d | ' % itr))
logger.record_tabular('Iteration', itr)
logger.record_tabular('CurStdMean', np.mean(cur_std))
undiscounted_returns = np.array([info['undiscounted_return'] for info in infos])
logger.record_tabular('AverageReturn', np.mean(undiscounted_returns))
logger.record_tabular('StdReturn', np.mean(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
logger.record_tabular('AverageDiscountedReturn', np.mean(fs))
logger.record_tabular('AvgTrajLen', np.mean([len(info['returns']) for info in infos]))
self.env.log_diagnostics(infos)
self.policy.log_diagnostics(infos)
logger.save_itr_params(itr, dict(itr=itr, policy=self.policy, env=self.env))
logger.dump_tabular(with_prefix=False)
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
logger.pop_prefix()
itr += 1
self.policy.set_param_values(es.result()[0])
parallel_sampler.terminate_task()
|
def parse_update_method(update_method, **kwargs):
if (update_method == 'adam'):
return partial(lasagne.updates.adam, **ext.compact(kwargs))
elif (update_method == 'sgd'):
return partial(lasagne.updates.sgd, **ext.compact(kwargs))
else:
raise NotImplementedError
|
class SimpleReplayPool(object):
def __init__(self, max_pool_size, observation_dim, action_dim):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_pool_size = max_pool_size
self._observations = np.zeros((max_pool_size, observation_dim))
self._actions = np.zeros((max_pool_size, action_dim))
self._rewards = np.zeros(max_pool_size)
self._terminals = np.zeros(max_pool_size, dtype='uint8')
self._bottom = 0
self._top = 0
self._size = 0
def add_sample(self, observation, action, reward, terminal):
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._top = ((self._top + 1) % self._max_pool_size)
if (self._size >= self._max_pool_size):
self._bottom = ((self._bottom + 1) % self._max_pool_size)
else:
self._size += 1
def random_batch(self, batch_size):
assert (self._size > batch_size)
indices = np.zeros(batch_size, dtype='uint64')
transition_indices = np.zeros(batch_size, dtype='uint64')
count = 0
while (count < batch_size):
index = (np.random.randint(self._bottom, (self._bottom + self._size)) % self._max_pool_size)
if ((index == (self._size - 1)) and (self._size <= self._max_pool_size)):
continue
transition_index = ((index + 1) % self._max_pool_size)
indices[count] = index
transition_indices[count] = transition_index
count += 1
return dict(observations=self._observations[indices], actions=self._actions[indices], rewards=self._rewards[indices], terminals=self._terminals[indices], next_observations=self._observations[transition_indices])
@property
def size(self):
return self._size
|
class DDPG(RLAlgorithm):
'\n Deep Deterministic Policy Gradient.\n '
def __init__(self, env, policy, qf, es, batch_size=32, n_epochs=200, epoch_length=1000, min_pool_size=10000, replay_pool_size=1000000, discount=0.99, max_path_length=250, qf_weight_decay=0.0, qf_update_method='adam', qf_learning_rate=0.001, policy_weight_decay=0, policy_update_method='adam', policy_learning_rate=0.001, eval_samples=10000, soft_target=True, soft_target_tau=0.001, n_updates_per_sample=1, scale_reward=1.0, include_horizon_terminal_transitions=False, plot=False, pause_for_plot=False):
'\n :param env: Environment\n :param policy: Policy\n :param qf: Q function\n :param es: Exploration strategy\n :param batch_size: Number of samples for each minibatch.\n :param n_epochs: Number of epochs. Policy will be evaluated after each epoch.\n :param epoch_length: How many timesteps for each epoch.\n :param min_pool_size: Minimum size of the pool to start training.\n :param replay_pool_size: Size of the experience replay pool.\n :param discount: Discount factor for the cumulative return.\n :param max_path_length: Discount factor for the cumulative return.\n :param qf_weight_decay: Weight decay factor for parameters of the Q function.\n :param qf_update_method: Online optimization method for training Q function.\n :param qf_learning_rate: Learning rate for training Q function.\n :param policy_weight_decay: Weight decay factor for parameters of the policy.\n :param policy_update_method: Online optimization method for training the policy.\n :param policy_learning_rate: Learning rate for training the policy.\n :param eval_samples: Number of samples (timesteps) for evaluating the policy.\n :param soft_target_tau: Interpolation parameter for doing the soft target update.\n :param n_updates_per_sample: Number of Q function and policy updates per new sample obtained\n :param scale_reward: The scaling factor applied to the rewards when training\n :param include_horizon_terminal_transitions: whether to include transitions with terminal=True because the\n horizon was reached. This might make the Q value back up less stable for certain tasks.\n :param plot: Whether to visualize the policy performance after each eval_interval.\n :param pause_for_plot: Whether to pause before continuing when plotting.\n :return:\n '
self.env = env
self.policy = policy
self.qf = qf
self.es = es
self.batch_size = batch_size
self.n_epochs = n_epochs
self.epoch_length = epoch_length
self.min_pool_size = min_pool_size
self.replay_pool_size = replay_pool_size
self.discount = discount
self.max_path_length = max_path_length
self.qf_weight_decay = qf_weight_decay
self.qf_update_method = parse_update_method(qf_update_method, learning_rate=qf_learning_rate)
self.qf_learning_rate = qf_learning_rate
self.policy_weight_decay = policy_weight_decay
self.policy_update_method = parse_update_method(policy_update_method, learning_rate=policy_learning_rate)
self.policy_learning_rate = policy_learning_rate
self.eval_samples = eval_samples
self.soft_target_tau = soft_target_tau
self.n_updates_per_sample = n_updates_per_sample
self.include_horizon_terminal_transitions = include_horizon_terminal_transitions
self.plot = plot
self.pause_for_plot = pause_for_plot
self.qf_loss_averages = []
self.policy_surr_averages = []
self.q_averages = []
self.y_averages = []
self.paths = []
self.es_path_returns = []
self.paths_samples_cnt = 0
self.scale_reward = scale_reward
self.opt_info = None
def start_worker(self):
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
@overrides
def train(self):
pool = SimpleReplayPool(max_pool_size=self.replay_pool_size, observation_dim=self.env.observation_space.flat_dim, action_dim=self.env.action_space.flat_dim)
self.start_worker()
self.init_opt()
itr = 0
path_length = 0
path_return = 0
terminal = False
observation = self.env.reset()
sample_policy = pickle.loads(pickle.dumps(self.policy))
for epoch in range(self.n_epochs):
logger.push_prefix(('epoch #%d | ' % epoch))
logger.log('Training started')
for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
if terminal:
observation = self.env.reset()
self.es.reset()
sample_policy.reset()
self.es_path_returns.append(path_return)
path_length = 0
path_return = 0
action = self.es.get_action(itr, observation, policy=sample_policy)
(next_observation, reward, terminal, _) = self.env.step(action)
path_length += 1
path_return += reward
if ((not terminal) and (path_length >= self.max_path_length)):
terminal = True
if self.include_horizon_terminal_transitions:
pool.add_sample(observation, action, (reward * self.scale_reward), terminal)
else:
pool.add_sample(observation, action, (reward * self.scale_reward), terminal)
observation = next_observation
if (pool.size >= self.min_pool_size):
for update_itr in range(self.n_updates_per_sample):
batch = pool.random_batch(self.batch_size)
self.do_training(itr, batch)
sample_policy.set_param_values(self.policy.get_param_values())
itr += 1
logger.log('Training finished')
if (pool.size >= self.min_pool_size):
self.evaluate(epoch, pool)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
if self.plot:
self.update_plot()
if self.pause_for_plot:
input('Plotting evaluation run: Press Enter to continue...')
self.env.terminate()
self.policy.terminate()
def init_opt(self):
target_policy = pickle.loads(pickle.dumps(self.policy))
target_qf = pickle.loads(pickle.dumps(self.qf))
obs = self.env.observation_space.new_tensor_variable('obs', extra_dims=1)
action = self.env.action_space.new_tensor_variable('action', extra_dims=1)
yvar = TT.vector('ys')
qf_weight_decay_term = ((0.5 * self.qf_weight_decay) * sum([TT.sum(TT.square(param)) for param in self.qf.get_params(regularizable=True)]))
qval = self.qf.get_qval_sym(obs, action)
qf_loss = TT.mean(TT.square((yvar - qval)))
qf_reg_loss = (qf_loss + qf_weight_decay_term)
policy_weight_decay_term = ((0.5 * self.policy_weight_decay) * sum([TT.sum(TT.square(param)) for param in self.policy.get_params(regularizable=True)]))
policy_qval = self.qf.get_qval_sym(obs, self.policy.get_action_sym(obs), deterministic=True)
policy_surr = (- TT.mean(policy_qval))
policy_reg_surr = (policy_surr + policy_weight_decay_term)
qf_updates = self.qf_update_method(qf_reg_loss, self.qf.get_params(trainable=True))
policy_updates = self.policy_update_method(policy_reg_surr, self.policy.get_params(trainable=True))
f_train_qf = ext.compile_function(inputs=[yvar, obs, action], outputs=[qf_loss, qval], updates=qf_updates)
f_train_policy = ext.compile_function(inputs=[obs], outputs=policy_surr, updates=policy_updates)
self.opt_info = dict(f_train_qf=f_train_qf, f_train_policy=f_train_policy, target_qf=target_qf, target_policy=target_policy)
def do_training(self, itr, batch):
(obs, actions, rewards, next_obs, terminals) = ext.extract(batch, 'observations', 'actions', 'rewards', 'next_observations', 'terminals')
target_qf = self.opt_info['target_qf']
target_policy = self.opt_info['target_policy']
(next_actions, _) = target_policy.get_actions(next_obs)
next_qvals = target_qf.get_qval(next_obs, next_actions)
ys = (rewards + (((1.0 - terminals) * self.discount) * next_qvals))
f_train_qf = self.opt_info['f_train_qf']
f_train_policy = self.opt_info['f_train_policy']
(qf_loss, qval) = f_train_qf(ys, obs, actions)
policy_surr = f_train_policy(obs)
target_policy.set_param_values(((target_policy.get_param_values() * (1.0 - self.soft_target_tau)) + (self.policy.get_param_values() * self.soft_target_tau)))
target_qf.set_param_values(((target_qf.get_param_values() * (1.0 - self.soft_target_tau)) + (self.qf.get_param_values() * self.soft_target_tau)))
self.qf_loss_averages.append(qf_loss)
self.policy_surr_averages.append(policy_surr)
self.q_averages.append(qval)
self.y_averages.append(ys)
def evaluate(self, epoch, pool):
logger.log('Collecting samples for evaluation')
paths = parallel_sampler.sample_paths(policy_params=self.policy.get_param_values(), max_samples=self.eval_samples, max_path_length=self.max_path_length)
average_discounted_return = np.mean([special.discount_return(path['rewards'], self.discount) for path in paths])
returns = [sum(path['rewards']) for path in paths]
all_qs = np.concatenate(self.q_averages)
all_ys = np.concatenate(self.y_averages)
average_q_loss = np.mean(self.qf_loss_averages)
average_policy_surr = np.mean(self.policy_surr_averages)
average_action = np.mean(np.square(np.concatenate([path['actions'] for path in paths])))
policy_reg_param_norm = np.linalg.norm(self.policy.get_param_values(regularizable=True))
qfun_reg_param_norm = np.linalg.norm(self.qf.get_param_values(regularizable=True))
logger.record_tabular('Epoch', epoch)
logger.record_tabular('AverageReturn', np.mean(returns))
logger.record_tabular('StdReturn', np.std(returns))
logger.record_tabular('MaxReturn', np.max(returns))
logger.record_tabular('MinReturn', np.min(returns))
if (len(self.es_path_returns) > 0):
logger.record_tabular('AverageEsReturn', np.mean(self.es_path_returns))
logger.record_tabular('StdEsReturn', np.std(self.es_path_returns))
logger.record_tabular('MaxEsReturn', np.max(self.es_path_returns))
logger.record_tabular('MinEsReturn', np.min(self.es_path_returns))
logger.record_tabular('AverageDiscountedReturn', average_discounted_return)
logger.record_tabular('AverageQLoss', average_q_loss)
logger.record_tabular('AveragePolicySurr', average_policy_surr)
logger.record_tabular('AverageQ', np.mean(all_qs))
logger.record_tabular('AverageAbsQ', np.mean(np.abs(all_qs)))
logger.record_tabular('AverageY', np.mean(all_ys))
logger.record_tabular('AverageAbsY', np.mean(np.abs(all_ys)))
logger.record_tabular('AverageAbsQYDiff', np.mean(np.abs((all_qs - all_ys))))
logger.record_tabular('AverageAction', average_action)
logger.record_tabular('PolicyRegParamNorm', policy_reg_param_norm)
logger.record_tabular('QFunRegParamNorm', qfun_reg_param_norm)
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.qf_loss_averages = []
self.policy_surr_averages = []
self.q_averages = []
self.y_averages = []
self.es_path_returns = []
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
def get_epoch_snapshot(self, epoch):
return dict(env=self.env, epoch=epoch, qf=self.qf, policy=self.policy, target_qf=self.opt_info['target_qf'], target_policy=self.opt_info['target_policy'], es=self.es)
|
class ERWR(VPG, Serializable):
'\n Episodic Reward Weighted Regression [1]_\n\n Notes\n -----\n This does not implement the original RwR [2]_ that deals with "immediate reward problems" since\n it doesn\'t find solutions that optimize for temporally delayed rewards.\n\n .. [1] Kober, Jens, and Jan R. Peters. "Policy search for motor primitives in robotics." Advances in neural information processing systems. 2009.\n .. [2] Peters, Jan, and Stefan Schaal. "Using reward-weighted regression for reinforcement learning of task space control." Approximate Dynamic Programming and Reinforcement Learning, 2007. ADPRL 2007. IEEE International Symposium on. IEEE, 2007.\n '
def __init__(self, optimizer=None, optimizer_args=None, positive_adv=None, **kwargs):
Serializable.quick_init(self, locals())
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = LbfgsOptimizer(**optimizer_args)
super(ERWR, self).__init__(optimizer=optimizer, positive_adv=(True if (positive_adv is None) else positive_adv), **kwargs)
|
class NOP(BatchPolopt):
'\n NOP (no optimization performed) policy search algorithm\n '
def __init__(self, **kwargs):
super(NOP, self).__init__(**kwargs)
@overrides
def init_opt(self):
pass
@overrides
def optimize_policy(self, itr, samples_data):
pass
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict()
|
class NPO(BatchPolopt):
'\n Natural Policy Optimization.\n '
def __init__(self, optimizer=None, optimizer_args=None, step_size=0.01, truncate_local_is_ratio=None, **kwargs):
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
self.optimizer = optimizer
self.step_size = step_size
self.truncate_local_is_ratio = truncate_local_is_ratio
super(NPO, self).__init__(**kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=(1 + is_recurrent))
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=(1 + is_recurrent))
advantage_var = ext.new_tensor('advantage', ndim=(1 + is_recurrent), dtype=theano.config.floatX)
dist = self.policy.distribution
old_dist_info_vars = {k: ext.new_tensor(('old_%s' % k), ndim=(2 + is_recurrent), dtype=theano.config.floatX) for k in dist.dist_info_keys}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {k: ext.new_tensor(k, ndim=(2 + is_recurrent), dtype=theano.config.floatX) for k in self.policy.state_info_keys}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = TT.matrix('valid')
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars, dist_info_vars)
if (self.truncate_local_is_ratio is not None):
lr = TT.minimum(self.truncate_local_is_ratio, lr)
if is_recurrent:
mean_kl = (TT.sum((kl * valid_var)) / TT.sum(valid_var))
surr_loss = ((- TT.sum(((lr * advantage_var) * valid_var))) / TT.sum(valid_var))
else:
mean_kl = TT.mean(kl)
surr_loss = (- TT.mean((lr * advantage_var)))
input_list = (([obs_var, action_var, advantage_var] + state_info_vars_list) + old_dist_info_vars_list)
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(loss=surr_loss, target=self.policy, leq_constraint=(mean_kl, self.step_size), inputs=input_list, constraint_name='mean_kl')
return dict()
@overrides
def optimize_policy(self, itr, samples_data):
all_input_values = tuple(ext.extract(samples_data, 'observations', 'actions', 'advantages'))
agent_infos = samples_data['agent_infos']
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
all_input_values += (tuple(state_info_list) + tuple(dist_info_list))
if self.policy.recurrent:
all_input_values += (samples_data['valids'],)
loss_before = self.optimizer.loss(all_input_values)
mean_kl_before = self.optimizer.constraint_val(all_input_values)
self.optimizer.optimize(all_input_values)
mean_kl = self.optimizer.constraint_val(all_input_values)
loss_after = self.optimizer.loss(all_input_values)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('MeanKLBefore', mean_kl_before)
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('dLoss', (loss_before - loss_after))
return dict()
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
|
class PPO(NPO, Serializable):
'\n Penalized Policy Optimization.\n '
def __init__(self, optimizer=None, optimizer_args=None, **kwargs):
Serializable.quick_init(self, locals())
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
super(PPO, self).__init__(optimizer=optimizer, **kwargs)
|
class REPS(BatchPolopt, Serializable):
'\n Relative Entropy Policy Search (REPS)\n\n References\n ----------\n [1] J. Peters, K. Mulling, and Y. Altun, "Relative Entropy Policy Search," Artif. Intell., pp. 1607-1612, 2008.\n\n '
def __init__(self, epsilon=0.5, L2_reg_dual=0.0, L2_reg_loss=0.0, max_opt_itr=50, optimizer=scipy.optimize.fmin_l_bfgs_b, **kwargs):
'\n\n :param epsilon: Max KL divergence between new policy and old policy.\n :param L2_reg_dual: Dual regularization\n :param L2_reg_loss: Loss regularization\n :param max_opt_itr: Maximum number of batch optimization iterations.\n :param optimizer: Module path to the optimizer. It must support the same interface as\n scipy.optimize.fmin_l_bfgs_b.\n :return:\n '
Serializable.quick_init(self, locals())
super(REPS, self).__init__(**kwargs)
self.epsilon = epsilon
self.L2_reg_dual = L2_reg_dual
self.L2_reg_loss = L2_reg_loss
self.max_opt_itr = max_opt_itr
self.optimizer = optimizer
self.opt_info = None
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
self.param_eta = 15.0
self.param_v = np.random.rand(((self.env.observation_space.flat_dim * 2) + 4))
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=(1 + is_recurrent))
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=(1 + is_recurrent))
rewards = ext.new_tensor('rewards', ndim=(1 + is_recurrent), dtype=theano.config.floatX)
feat_diff = ext.new_tensor('feat_diff', ndim=(2 + is_recurrent), dtype=theano.config.floatX)
param_v = TT.vector('param_v')
param_eta = TT.scalar('eta')
valid_var = TT.matrix('valid')
state_info_vars = {k: ext.new_tensor(k, ndim=(2 + is_recurrent), dtype=theano.config.floatX) for k in self.policy.state_info_keys}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
dist = self.policy.distribution
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
delta_v = (rewards + TT.dot(feat_diff, param_v))
if is_recurrent:
loss = ((- TT.sum(((logli * TT.exp(((delta_v / param_eta) - TT.max((delta_v / param_eta))))) * valid_var))) / TT.sum(valid_var))
else:
loss = (- TT.mean((logli * TT.exp(((delta_v / param_eta) - TT.max((delta_v / param_eta)))))))
reg_params = self.policy.get_params(regularizable=True)
loss += ((self.L2_reg_loss * TT.sum([TT.mean(TT.square(param)) for param in reg_params])) / len(reg_params))
loss_grad = TT.grad(loss, self.policy.get_params(trainable=True))
if is_recurrent:
recurrent_vars = [valid_var]
else:
recurrent_vars = []
input = ((([rewards, obs_var, feat_diff, action_var] + state_info_vars_list) + recurrent_vars) + [param_eta, param_v])
f_loss = ext.compile_function(inputs=input, outputs=loss)
f_loss_grad = ext.compile_function(inputs=input, outputs=loss_grad)
old_dist_info_vars = {k: ext.new_tensor(('old_%s' % k), ndim=(2 + is_recurrent), dtype=theano.config.floatX) for k in dist.dist_info_keys}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
if is_recurrent:
mean_kl = (TT.sum((dist.kl_sym(old_dist_info_vars, dist_info_vars) * valid_var)) / TT.sum(valid_var))
else:
mean_kl = TT.mean(dist.kl_sym(old_dist_info_vars, dist_info_vars))
f_kl = ext.compile_function(inputs=((([obs_var, action_var] + state_info_vars_list) + old_dist_info_vars_list) + recurrent_vars), outputs=mean_kl)
if is_recurrent:
dual = (((param_eta * self.epsilon) + (param_eta * TT.log((TT.sum((TT.exp(((delta_v / param_eta) - TT.max((delta_v / param_eta)))) * valid_var)) / TT.sum(valid_var))))) + (param_eta * TT.max((delta_v / param_eta))))
else:
dual = (((param_eta * self.epsilon) + (param_eta * TT.log(TT.mean(TT.exp(((delta_v / param_eta) - TT.max((delta_v / param_eta)))))))) + (param_eta * TT.max((delta_v / param_eta))))
dual += (self.L2_reg_dual * (TT.square(param_eta) + TT.square((1 / param_eta))))
dual_grad = TT.grad(cost=dual, wrt=[param_eta, param_v])
f_dual = ext.compile_function(inputs=((([rewards, feat_diff] + state_info_vars_list) + recurrent_vars) + [param_eta, param_v]), outputs=dual)
f_dual_grad = ext.compile_function(inputs=((([rewards, feat_diff] + state_info_vars_list) + recurrent_vars) + [param_eta, param_v]), outputs=dual_grad)
self.opt_info = dict(f_loss_grad=f_loss_grad, f_loss=f_loss, f_dual=f_dual, f_dual_grad=f_dual_grad, f_kl=f_kl)
def _features(self, path):
o = np.clip(path['observations'], (- 10), 10)
l = len(path['rewards'])
al = (np.arange(l).reshape((- 1), 1) / 100.0)
return np.concatenate([o, (o ** 2), al, (al ** 2), (al ** 3), np.ones((l, 1))], axis=1)
@overrides
def optimize_policy(self, itr, samples_data):
rewards = samples_data['rewards']
actions = samples_data['actions']
observations = samples_data['observations']
agent_infos = samples_data['agent_infos']
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
if self.policy.recurrent:
recurrent_vals = [samples_data['valids']]
else:
recurrent_vals = []
feat_diff = []
for path in samples_data['paths']:
feats = self._features(path)
feats = np.vstack([feats, np.zeros(feats.shape[1])])
feat_diff.append((feats[1:] - feats[:(- 1)]))
if self.policy.recurrent:
max_path_length = max([len(path['advantages']) for path in samples_data['paths']])
feat_diff = np.array([tensor_utils.pad_tensor(fd, max_path_length) for fd in feat_diff])
else:
feat_diff = np.vstack(feat_diff)
f_dual = self.opt_info['f_dual']
f_dual_grad = self.opt_info['f_dual_grad']
def eval_dual(input):
param_eta = input[0]
param_v = input[1:]
val = f_dual(*((([rewards, feat_diff] + state_info_list) + recurrent_vals) + [param_eta, param_v]))
return val.astype(np.float64)
def eval_dual_grad(input):
param_eta = input[0]
param_v = input[1:]
grad = f_dual_grad(*((([rewards, feat_diff] + state_info_list) + recurrent_vals) + [param_eta, param_v]))
eta_grad = np.float(grad[0])
v_grad = grad[1]
return np.hstack([eta_grad, v_grad])
x0 = np.hstack([self.param_eta, self.param_v])
bounds = [((- np.inf), np.inf) for _ in x0]
bounds[0] = (0.0, np.inf)
logger.log('optimizing dual')
eta_before = x0[0]
dual_before = eval_dual(x0)
(params_ast, _, _) = self.optimizer(func=eval_dual, x0=x0, fprime=eval_dual_grad, bounds=bounds, maxiter=self.max_opt_itr, disp=0)
dual_after = eval_dual(params_ast)
self.param_eta = params_ast[0]
self.param_v = params_ast[1:]
cur_params = self.policy.get_param_values(trainable=True)
f_loss = self.opt_info['f_loss']
f_loss_grad = self.opt_info['f_loss_grad']
input = ((([rewards, observations, feat_diff, actions] + state_info_list) + recurrent_vals) + [self.param_eta, self.param_v])
def eval_loss(params):
self.policy.set_param_values(params, trainable=True)
val = f_loss(*input)
return val.astype(np.float64)
def eval_loss_grad(params):
self.policy.set_param_values(params, trainable=True)
grad = f_loss_grad(*input)
flattened_grad = tensor_utils.flatten_tensors(list(map(np.asarray, grad)))
return flattened_grad.astype(np.float64)
loss_before = eval_loss(cur_params)
logger.log('optimizing policy')
(params_ast, _, _) = self.optimizer(func=eval_loss, x0=cur_params, fprime=eval_loss_grad, disp=0, maxiter=self.max_opt_itr)
loss_after = eval_loss(params_ast)
f_kl = self.opt_info['f_kl']
mean_kl = f_kl(*((([observations, actions] + state_info_list) + dist_info_list) + recurrent_vals)).astype(np.float64)
logger.log(('eta %f -> %f' % (eta_before, self.param_eta)))
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('DualBefore', dual_before)
logger.record_tabular('DualAfter', dual_after)
logger.record_tabular('MeanKL', mean_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
|
class TNPG(NPO):
'\n Truncated Natural Policy Gradient.\n '
def __init__(self, optimizer=None, optimizer_args=None, **kwargs):
if (optimizer is None):
default_args = dict(max_backtracks=1)
if (optimizer_args is None):
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = ConjugateGradientOptimizer(**optimizer_args)
super(TNPG, self).__init__(optimizer=optimizer, **kwargs)
|
class TRPO(NPO):
'\n Trust Region Policy Optimization\n '
def __init__(self, optimizer=None, optimizer_args=None, **kwargs):
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = ConjugateGradientOptimizer(**optimizer_args)
super(TRPO, self).__init__(optimizer=optimizer, **kwargs)
|
def center_advantages(advantages):
return ((advantages - np.mean(advantages)) / (advantages.std() + 1e-08))
|
def shift_advantages_to_positive(advantages):
return ((advantages - np.min(advantages)) + 1e-08)
|
def sign(x):
return ((1.0 * (x >= 0)) - (1.0 * (x < 0)))
|
class ReplayPool(Serializable):
'\n A utility class for experience replay.\n The code is adapted from https://github.com/spragunr/deep_q_rl\n '
def __init__(self, observation_shape, action_dim, max_steps, observation_dtype=np.float32, action_dtype=np.float32, concat_observations=False, concat_length=1, rng=None):
'Construct a ReplayPool.\n\n Arguments:\n observation_shape - tuple indicating the shape of the observation\n action_dim - dimension of the action\n size - capacity of the replay pool\n observation_dtype - ...\n action_dtype - ...\n concat_observations - whether to concat the past few observations\n as a single one, so as to ensure the Markov property\n concat_length - length of the concatenation\n '
self.observation_shape = observation_shape
self.action_dim = action_dim
self.max_steps = max_steps
self.observations = np.zeros(((max_steps,) + observation_shape), dtype=observation_dtype)
self.actions = np.zeros((max_steps, action_dim), dtype=action_dtype)
self.rewards = np.zeros((max_steps,), dtype=np.float32)
self.terminals = np.zeros((max_steps,), dtype='bool')
self.extras = None
self.concat_observations = concat_observations
self.concat_length = concat_length
self.observation_dtype = observation_dtype
self.action_dtype = action_dtype
if rng:
self.rng = rng
else:
self.rng = np.random.RandomState()
if (not concat_observations):
assert (concat_length == 1), 'concat_length must be set to 1 if not concatenating observations'
self.bottom = 0
self.top = 0
self.size = 0
super(ReplayPool, self).__init__(self, observation_shape, action_dim, max_steps, observation_dtype, action_dtype, concat_observations, concat_length, rng)
def __getstate__(self):
d = super(ReplayPool, self).__getstate__()
d['bottom'] = self.bottom
d['top'] = self.top
d['size'] = self.size
d['observations'] = self.observations
d['actions'] = self.actions
d['rewards'] = self.rewards
d['terminals'] = self.terminals
d['extras'] = self.extras
d['rng'] = self.rng
return d
def __setstate__(self, d):
super(ReplayPool, self).__setstate__(d)
(self.bottom, self.top, self.size, self.observations, self.actions, self.rewards, self.terminals, self.extras, self.rng) = extract(d, 'bottom', 'top', 'size', 'observations', 'actions', 'rewards', 'terminals', 'extras', 'rng')
def add_sample(self, observation, action, reward, terminal, extra=None):
'Add a time step record.\n\n Arguments:\n observation -- current or observation\n action -- action chosen by the agent\n reward -- reward received after taking the action\n terminal -- boolean indicating whether the episode ended after this\n time step\n '
self.observations[self.top] = observation
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminals[self.top] = terminal
if (extra is not None):
if (self.extras is None):
assert (self.size == 0), 'extra must be consistent'
self.extras = np.zeros(((self.max_steps,) + extra.shape), dtype=extra.dtype)
self.extras[self.top] = extra
else:
assert (self.extras is None)
if (self.size == self.max_steps):
self.bottom = ((self.bottom + 1) % self.max_steps)
else:
self.size += 1
self.top = ((self.top + 1) % self.max_steps)
def __len__(self):
'Return an approximate count of stored state transitions.'
return max(0, (self.size - self.concat_length))
def last_concat_state(self):
'\n Return the most recent sample (concatenated observations if needed).\n '
if self.concat_observations:
indexes = np.arange((self.top - self.concat_length), self.top)
return self.observations.take(indexes, axis=0, mode='wrap')
else:
return self.observations[(self.top - 1)]
def concat_state(self, state):
'Return a concatenated state, using the last concat_length -\n 1, plus state.\n\n '
if self.concat_observations:
indexes = np.arange(((self.top - self.concat_length) + 1), self.top)
concat_state = np.empty(((self.concat_length,) + self.observation_shape), dtype=floatX)
concat_state[0:(self.concat_length - 1)] = self.observations.take(indexes, axis=0, mode='wrap')
concat_state[(- 1)] = state
return concat_state
else:
return state
def random_batch(self, batch_size):
'\n Return corresponding observations, actions, rewards, terminal status,\n and next_observations for batch_size randomly chosen state transitions.\n '
observations = np.zeros(((batch_size, self.concat_length) + self.observation_shape), dtype=self.observation_dtype)
actions = np.zeros((batch_size, self.action_dim), dtype=self.action_dtype)
rewards = np.zeros((batch_size,), dtype=floatX)
terminals = np.zeros((batch_size,), dtype='bool')
if (self.extras is not None):
extras = np.zeros(((batch_size,) + self.extras.shape[1:]), dtype=self.extras.dtype)
next_extras = np.zeros(((batch_size,) + self.extras.shape[1:]), dtype=self.extras.dtype)
else:
extras = None
next_extras = None
next_observations = np.zeros(((batch_size, self.concat_length) + self.observation_shape), dtype=self.observation_dtype)
next_actions = np.zeros((batch_size, self.action_dim), dtype=self.action_dtype)
count = 0
while (count < batch_size):
index = self.rng.randint(self.bottom, ((self.bottom + self.size) - self.concat_length))
initial_indices = np.arange(index, (index + self.concat_length))
transition_indices = (initial_indices + 1)
end_index = ((index + self.concat_length) - 1)
if np.any(self.terminals.take(initial_indices[0:(- 1)], mode='wrap')):
continue
observations[count] = self.observations.take(initial_indices, axis=0, mode='wrap')
actions[count] = self.actions.take(end_index, mode='wrap')
rewards[count] = self.rewards.take(end_index, mode='wrap')
terminals[count] = self.terminals.take(end_index, mode='wrap')
if (self.extras is not None):
extras[count] = self.extras.take(end_index, axis=0, mode='wrap')
next_extras[count] = self.extras.take(transition_indices, axis=0, mode='wrap')
next_observations[count] = self.observations.take(transition_indices, axis=0, mode='wrap')
next_actions[count] = self.actions.take(transition_indices, axis=0, mode='wrap')
count += 1
if (not self.concat_observations):
observations = np.squeeze(observations, axis=1)
next_observations = np.squeeze(next_observations, axis=1)
return dict(observations=observations, actions=actions, rewards=rewards, next_observations=next_observations, next_actions=next_actions, terminals=terminals, extras=extras, next_extras=next_extras)
|
def simple_tests():
np.random.seed(222)
dataset = ReplayPool(observation_shape=(3, 2), action_dim=1, max_steps=6, concat_observations=True, concat_length=4)
for _ in range(10):
img = np.random.randint(0, 256, size=(3, 2))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if (np.random.random() < 0.05):
terminal = True
print('img', img)
dataset.add_sample(img, action, reward, terminal)
print('S', dataset.observations)
print('A', dataset.actions)
print('R', dataset.rewards)
print('T', dataset.terminal)
print('SIZE', dataset.size)
print()
print('LAST CONCAT STATE', dataset.last_concat_state())
print()
print('BATCH', dataset.random_batch(2))
|
def speed_tests():
dataset = ReplayPool(observation_shape=(80, 80), action_dim=1, max_steps=20000, concat_observations=True, concat_length=4)
img = np.random.randint(0, 256, size=(80, 80))
action = np.random.randint(16)
reward = np.random.random()
start = time.time()
for _ in range(100000):
terminal = False
if (np.random.random() < 0.05):
terminal = True
dataset.add_sample(img, action, reward, terminal)
print('samples per second: ', (100000 / (time.time() - start)))
start = time.time()
for _ in range(200):
dataset.random_batch(32)
print('batches per second: ', (200 / (time.time() - start)))
print(dataset.last_concat_state())
|
def trivial_tests():
dataset = ReplayPool(observation_shape=(1, 2), action_dim=1, max_steps=3, concat_observations=True, concat_length=2)
img1 = np.array([[1, 1]], dtype='uint8')
img2 = np.array([[2, 2]], dtype='uint8')
img3 = np.array([[3, 3]], dtype='uint8')
dataset.add_sample(img1, 1, 1, False)
dataset.add_sample(img2, 2, 2, False)
dataset.add_sample(img3, 2, 2, True)
print('last', dataset.last_concat_state())
print('random', dataset.random_batch(1))
|
def max_size_tests():
dataset1 = ReplayPool(observation_shape=(4, 3), action_dim=1, max_steps=10, concat_observations=True, concat_length=4, rng=np.random.RandomState(42))
dataset2 = ReplayPool(observation_shape=(4, 3), action_dim=1, max_steps=1000, concat_observations=True, concat_length=4, rng=np.random.RandomState(42))
for _ in range(100):
img = np.random.randint(0, 256, size=(4, 3))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if (np.random.random() < 0.05):
terminal = True
dataset1.add_sample(img, action, reward, terminal)
dataset2.add_sample(img, action, reward, terminal)
np.testing.assert_array_almost_equal(dataset1.last_concat_state(), dataset2.last_concat_state())
print('passed')
|
def test_memory_usage_ok():
import memory_profiler
dataset = ReplayPool(observation_shape=(80, 80), action_dim=1, max_steps=100000, concat_observations=True, concat_length=4)
last = time.time()
for i in range(1000000000):
if ((i % 100000) == 0):
print(i)
dataset.add_sample(np.random.random((80, 80)), 1, 1, False)
if (i > 200000):
dataset.random_batch(32)
if ((i % 10007) == 0):
print((time.time() - last))
mem_usage = memory_profiler.memory_usage((- 1))
print(len(dataset), mem_usage)
last = time.time()
|
def main():
speed_tests()
max_size_tests()
simple_tests()
|
class VPG(BatchPolopt, Serializable):
'\n Vanilla Policy Gradient.\n '
def __init__(self, env, policy, baseline, optimizer=None, optimizer_args=None, **kwargs):
Serializable.quick_init(self, locals())
if (optimizer is None):
default_args = dict(batch_size=None, max_epochs=1)
if (optimizer_args is None):
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = FirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
super(VPG, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=(1 + is_recurrent))
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=(1 + is_recurrent))
advantage_var = ext.new_tensor('advantage', ndim=(1 + is_recurrent), dtype=theano.config.floatX)
dist = self.policy.distribution
old_dist_info_vars = {k: ext.new_tensor(('old_%s' % k), ndim=(2 + is_recurrent), dtype=theano.config.floatX) for k in dist.dist_info_keys}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
if is_recurrent:
valid_var = TT.matrix('valid')
else:
valid_var = None
state_info_vars = {k: ext.new_tensor(k, ndim=(2 + is_recurrent), dtype=theano.config.floatX) for k in self.policy.state_info_keys}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
if is_recurrent:
surr_obj = ((- TT.sum(((logli * advantage_var) * valid_var))) / TT.sum(valid_var))
mean_kl = (TT.sum((kl * valid_var)) / TT.sum(valid_var))
max_kl = TT.max((kl * valid_var))
else:
surr_obj = (- TT.mean((logli * advantage_var)))
mean_kl = TT.mean(kl)
max_kl = TT.max(kl)
input_list = ([obs_var, action_var, advantage_var] + state_info_vars_list)
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(surr_obj, target=self.policy, inputs=input_list)
f_kl = ext.compile_function(inputs=(input_list + old_dist_info_vars_list), outputs=[mean_kl, max_kl])
self.opt_info = dict(f_kl=f_kl)
@overrides
def optimize_policy(self, itr, samples_data):
logger.log('optimizing policy')
inputs = ext.extract(samples_data, 'observations', 'actions', 'advantages')
agent_infos = samples_data['agent_infos']
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
inputs += tuple(state_info_list)
if self.policy.recurrent:
inputs += (samples_data['valids'],)
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
loss_before = self.optimizer.loss(inputs)
self.optimizer.optimize(inputs)
loss_after = self.optimizer.loss(inputs)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
(mean_kl, max_kl) = self.opt_info['f_kl'](*(list(inputs) + dist_info_list))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
|
class Baseline(object):
def __init__(self, env_spec):
self._mdp_spec = env_spec
@property
def algorithm_parallelized(self):
return False
def get_param_values(self):
raise NotImplementedError
def set_param_values(self, val):
raise NotImplementedError
def fit(self, paths):
raise NotImplementedError
def predict(self, path):
raise NotImplementedError
@classmethod
@autoargs.add_args
def add_args(cls, parser):
pass
@classmethod
@autoargs.new_from_args
def new_from_args(cls, args, mdp):
pass
def log_diagnostics(self, paths):
'\n Log extra information per iteration based on the collected paths\n '
pass
|
class GaussianConvBaseline(Baseline, Parameterized, Serializable):
def __init__(self, env_spec, subsample_factor=1.0, regressor_args=None):
Serializable.quick_init(self, locals())
super(GaussianConvBaseline, self).__init__(env_spec)
if (regressor_args is None):
regressor_args = dict()
self._regressor = GaussianConvRegressor(input_shape=env_spec.observation_space.shape, output_dim=1, name='vf', **regressor_args)
@overrides
def fit(self, paths):
observations = np.concatenate([p['observations'] for p in paths])
returns = np.concatenate([p['returns'] for p in paths])
self._regressor.fit(observations, returns.reshape(((- 1), 1)))
@overrides
def predict(self, path):
return self._regressor.predict(path['observations']).flatten()
@overrides
def get_param_values(self, **tags):
return self._regressor.get_param_values(**tags)
@overrides
def set_param_values(self, flattened_params, **tags):
self._regressor.set_param_values(flattened_params, **tags)
|
class GaussianMLPBaseline(Baseline, Parameterized, Serializable):
def __init__(self, env_spec, subsample_factor=1.0, num_seq_inputs=1, regressor_args=None):
Serializable.quick_init(self, locals())
super(GaussianMLPBaseline, self).__init__(env_spec)
if (regressor_args is None):
regressor_args = dict()
self._regressor = GaussianMLPRegressor(input_shape=((env_spec.observation_space.flat_dim * num_seq_inputs),), output_dim=1, name='vf', **regressor_args)
@overrides
def fit(self, paths, log=True):
observations = np.concatenate([p['observations'] for p in paths])
returns = np.concatenate([p['returns'] for p in paths])
self._regressor.fit(observations, returns.reshape(((- 1), 1)), log=log)
@overrides
def predict(self, path):
return self._regressor.predict(path['observations']).flatten()
@overrides
def get_param_values(self, **tags):
return self._regressor.get_param_values(**tags)
@overrides
def set_param_values(self, flattened_params, **tags):
self._regressor.set_param_values(flattened_params, **tags)
|
class LinearFeatureBaseline(Baseline):
def __init__(self, env_spec, reg_coeff=1e-05):
self._coeffs = None
self._reg_coeff = reg_coeff
@overrides
def get_param_values(self, **tags):
return self._coeffs
@overrides
def set_param_values(self, val, **tags):
self._coeffs = val
def _features(self, path):
o = np.clip(path['observations'], (- 10), 10)
l = len(path['rewards'])
al = (np.arange(l).reshape((- 1), 1) / 100.0)
return np.concatenate([o, (o ** 2), al, (al ** 2), (al ** 3), np.ones((l, 1))], axis=1)
@overrides
def fit(self, paths, **kwargs):
featmat = np.concatenate([self._features(path) for path in paths])
returns = np.concatenate([path['returns'] for path in paths])
reg_coeff = self._reg_coeff
for _ in range(5):
self._coeffs = np.linalg.lstsq((featmat.T.dot(featmat) + (reg_coeff * np.identity(featmat.shape[1]))), featmat.T.dot(returns))[0]
if (not np.any(np.isnan(self._coeffs))):
break
reg_coeff *= 10
@overrides
def predict(self, path):
if (self._coeffs is None):
return np.zeros(len(path['rewards']))
return self._features(path).dot(self._coeffs)
|
class ZeroBaseline(Baseline):
def __init__(self, env_spec):
pass
@overrides
def get_param_values(self, **kwargs):
return None
@overrides
def set_param_values(self, val, **kwargs):
pass
@overrides
def fit(self, paths, **kwargs):
pass
@overrides
def predict(self, path):
return np.zeros_like(path['rewards'])
|
def get_full_output(layer_or_layers, inputs=None, **kwargs):
"\n Computes the output of the network at one or more given layers.\n Optionally, you can define the input(s) to propagate through the network\n instead of using the input variable(s) associated with the network's\n input layer(s).\n\n Parameters\n ----------\n layer_or_layers : Layer or list\n the :class:`Layer` instance for which to compute the output\n expressions, or a list of :class:`Layer` instances.\n\n inputs : None, Theano expression, numpy array, or dict\n If None, uses the input variables associated with the\n :class:`InputLayer` instances.\n If a Theano expression, this defines the input for a single\n :class:`InputLayer` instance. Will throw a ValueError if there\n are multiple :class:`InputLayer` instances.\n If a numpy array, this will be wrapped as a Theano constant\n and used just like a Theano expression.\n If a dictionary, any :class:`Layer` instance (including the\n input layers) can be mapped to a Theano expression or numpy\n array to use instead of its regular output.\n\n Returns\n -------\n output : Theano expression or list\n the output of the given layer(s) for the given network input\n\n Notes\n -----\n Depending on your network architecture, `get_output([l1, l2])` may\n be crucially different from `[get_output(l1), get_output(l2)]`. Only\n the former ensures that the output expressions depend on the same\n intermediate expressions. For example, when `l1` and `l2` depend on\n a common dropout layer, the former will use the same dropout mask for\n both, while the latter will use two different dropout masks.\n "
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
treat_as_input = (list(inputs.keys()) if isinstance(inputs, dict) else [])
all_layers = get_all_layers(layer_or_layers, treat_as_input)
all_outputs = dict(((layer, layer.input_var) for layer in all_layers if (isinstance(layer, InputLayer) and (layer not in treat_as_input))))
extra_outputs = dict()
if isinstance(inputs, dict):
all_outputs.update(((layer, utils.as_theano_expression(expr)) for (layer, expr) in list(inputs.items())))
elif (inputs is not None):
for input_layer in all_outputs:
all_outputs[input_layer] = utils.as_theano_expression(inputs)
for layer in all_layers:
if (layer not in all_outputs):
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer] for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
raise ValueError(('get_output() was called without giving an input expression for the free-floating layer %r. Please call it with a dictionary mapping this layer to an input expression.' % layer))
if hasattr(layer, 'get_full_output_for'):
(output, extra) = layer.get_full_output_for(layer_inputs, **kwargs)
all_outputs[layer] = output
extra_outputs[layer] = extra
else:
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
return ([all_outputs[layer] for layer in layer_or_layers], extra_outputs)
except TypeError:
return (all_outputs[layer_or_layers], extra_outputs)
|
def get_output(layer_or_layers, inputs=None, **kwargs):
return get_full_output(layer_or_layers, inputs, **kwargs)[0]
|
class ParamLayer(L.Layer):
def __init__(self, incoming, num_units, param=lasagne.init.Constant(0.0), trainable=True, **kwargs):
super(ParamLayer, self).__init__(incoming, **kwargs)
self.num_units = num_units
self.param = self.add_param(param, (num_units,), name='param', trainable=trainable)
def get_output_shape_for(self, input_shape):
return (input_shape[:(- 1)] + (self.num_units,))
def get_output_for(self, input, **kwargs):
ndim = input.ndim
reshaped_param = TT.reshape(self.param, (((1,) * (ndim - 1)) + (self.num_units,)))
tile_arg = TT.concatenate([input.shape[:(- 1)], [1]])
tiled = TT.tile(reshaped_param, tile_arg, ndim=ndim)
return tiled
|
class OpLayer(L.MergeLayer):
def __init__(self, incoming, op, shape_op=(lambda x: x), extras=None, **kwargs):
if (extras is None):
extras = []
incomings = ([incoming] + extras)
super(OpLayer, self).__init__(incomings, **kwargs)
self.op = op
self.shape_op = shape_op
self.incomings = incomings
def get_output_shape_for(self, input_shapes):
return self.shape_op(*input_shapes)
def get_output_for(self, inputs, **kwargs):
return self.op(*inputs)
|
class BatchNormLayer(L.Layer):
"\n lasagne.layers.BatchNormLayer(incoming, axes='auto', epsilon=1e-4,\n alpha=0.1, mode='low_mem',\n beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1),\n mean=lasagne.init.Constant(0), std=lasagne.init.Constant(1), **kwargs)\n\n Batch Normalization\n\n This layer implements batch normalization of its inputs, following [1]_:\n\n .. math::\n y = \\frac{x - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\gamma + \\beta\n\n That is, the input is normalized to zero mean and unit variance, and then\n linearly transformed. The crucial part is that the mean and variance are\n computed across the batch dimension, i.e., over examples, not per example.\n\n During training, :math:`\\mu` and :math:`\\sigma^2` are defined to be the\n mean and variance of the current input mini-batch :math:`x`, and during\n testing, they are replaced with average statistics over the training\n data. Consequently, this layer has four stored parameters: :math:`\\beta`,\n :math:`\\gamma`, and the averages :math:`\\mu` and :math:`\\sigma^2`\n (nota bene: instead of :math:`\\sigma^2`, the layer actually stores\n :math:`1 / \\sqrt{\\sigma^2 + \\epsilon}`, for compatibility to cuDNN).\n By default, this layer learns the average statistics as exponential moving\n averages computed during training, so it can be plugged into an existing\n network without any changes of the training procedure (see Notes).\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n axes : 'auto', int or tuple of int\n The axis or axes to normalize over. If ``'auto'`` (the default),\n normalize over all axes except for the second: this will normalize over\n the minibatch dimension for dense layers, and additionally over all\n spatial dimensions for convolutional layers.\n epsilon : scalar\n Small constant :math:`\\epsilon` added to the variance before taking\n the square root and dividing by it, to avoid numerical problems\n alpha : scalar\n Coefficient for the exponential moving average of batch-wise means and\n standard deviations computed during training; the closer to one, the\n more it will depend on the last batches seen\n beta : Theano shared variable, expression, numpy array, callable or None\n Initial value, expression or initializer for :math:`\\beta`. Must match\n the incoming shape, skipping all axes in `axes`. Set to ``None`` to fix\n it to 0.0 instead of learning it.\n See :func:`lasagne.utils.create_param` for more information.\n gamma : Theano shared variable, expression, numpy array, callable or None\n Initial value, expression or initializer for :math:`\\gamma`. Must\n match the incoming shape, skipping all axes in `axes`. Set to ``None``\n to fix it to 1.0 instead of learning it.\n See :func:`lasagne.utils.create_param` for more information.\n mean : Theano shared variable, expression, numpy array, or callable\n Initial value, expression or initializer for :math:`\\mu`. Must match\n the incoming shape, skipping all axes in `axes`.\n See :func:`lasagne.utils.create_param` for more information.\n std : Theano shared variable, expression, numpy array, or callable\n Initial value, expression or initializer for :math:`1 / \\sqrt{\n \\sigma^2 + \\epsilon}`. Must match the incoming shape, skipping all\n axes in `axes`.\n See :func:`lasagne.utils.create_param` for more information.\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n\n Notes\n -----\n This layer should be inserted between a linear transformation (such as a\n :class:`DenseLayer`, or :class:`Conv2DLayer`) and its nonlinearity. The\n convenience function :func:`batch_norm` modifies an existing layer to\n insert batch normalization in front of its nonlinearity.\n\n The behavior can be controlled by passing keyword arguments to\n :func:`lasagne.layers.get_output()` when building the output expression\n of any network containing this layer.\n\n During training, [1]_ normalize each input mini-batch by its statistics\n and update an exponential moving average of the statistics to be used for\n validation. This can be achieved by passing ``deterministic=False``.\n For validation, [1]_ normalize each input mini-batch by the stored\n statistics. This can be achieved by passing ``deterministic=True``.\n\n For more fine-grained control, ``batch_norm_update_averages`` can be passed\n to update the exponential moving averages (``True``) or not (``False``),\n and ``batch_norm_use_averages`` can be passed to use the exponential moving\n averages for normalization (``True``) or normalize each mini-batch by its\n own statistics (``False``). These settings override ``deterministic``.\n\n Note that for testing a model after training, [1]_ replace the stored\n exponential moving average statistics by fixing all network weights and\n re-computing average statistics over the training data in a layerwise\n fashion. This is not part of the layer implementation.\n\n In case you set `axes` to not include the batch dimension (the first axis,\n usually), normalization is done per example, not across examples. This does\n not require any averages, so you can pass ``batch_norm_update_averages``\n and ``batch_norm_use_averages`` as ``False`` in this case.\n\n See also\n --------\n batch_norm : Convenience function to apply batch normalization to a layer\n\n References\n ----------\n .. [1] Ioffe, Sergey and Szegedy, Christian (2015):\n Batch Normalization: Accelerating Deep Network Training by Reducing\n Internal Covariate Shift. http://arxiv.org/abs/1502.03167.\n "
def __init__(self, incoming, axes='auto', epsilon=0.0001, alpha=0.1, mode='low_mem', beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1), mean=lasagne.init.Constant(0), std=lasagne.init.Constant(1), **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
if (axes == 'auto'):
axes = ((0,) + tuple(range(2, len(self.input_shape))))
elif isinstance(axes, int):
axes = (axes,)
self.axes = axes
self.epsilon = epsilon
self.alpha = alpha
self.mode = mode
shape = [size for (axis, size) in enumerate(self.input_shape) if (axis not in self.axes)]
if any(((size is None) for size in shape)):
raise ValueError('BatchNormLayer needs specified input sizes for all axes not normalized over.')
if (beta is None):
self.beta = None
else:
self.beta = self.add_param(beta, shape, 'beta', trainable=True, regularizable=False)
if (gamma is None):
self.gamma = None
else:
self.gamma = self.add_param(gamma, shape, 'gamma', trainable=True, regularizable=False)
self.mean = self.add_param(mean, shape, 'mean', trainable=False, regularizable=False)
self.std = self.add_param(std, shape, 'std', trainable=False, regularizable=False)
def get_output_for(self, input, deterministic=False, **kwargs):
input_mean = input.mean(self.axes)
input_std = TT.sqrt((input.var(self.axes) + self.epsilon))
use_averages = kwargs.get('batch_norm_use_averages', deterministic)
if use_averages:
mean = self.mean
std = self.std
else:
mean = input_mean
std = input_std
update_averages = kwargs.get('batch_norm_update_averages', (not deterministic))
if update_averages:
running_mean = theano.clone(self.mean, share_inputs=False)
running_std = theano.clone(self.std, share_inputs=False)
running_mean.default_update = (((1 - self.alpha) * running_mean) + (self.alpha * input_mean))
running_std.default_update = (((1 - self.alpha) * running_std) + (self.alpha * input_std))
mean += (0 * running_mean)
std += (0 * running_std)
param_axes = iter(list(range((input.ndim - len(self.axes)))))
pattern = [('x' if (input_axis in self.axes) else next(param_axes)) for input_axis in range(input.ndim)]
beta = (0 if (self.beta is None) else self.beta.dimshuffle(pattern))
gamma = (1 if (self.gamma is None) else self.gamma.dimshuffle(pattern))
mean = mean.dimshuffle(pattern)
std = std.dimshuffle(pattern)
normalized = (((input - mean) * (gamma * TT.inv(std))) + beta)
return normalized
|
def batch_norm(layer, **kwargs):
"\n Apply batch normalization to an existing layer. This is a convenience\n function modifying an existing layer to include batch normalization: It\n will steal the layer's nonlinearity if there is one (effectively\n introducing the normalization right before the nonlinearity), remove\n the layer's bias if there is one (because it would be redundant), and add\n a :class:`BatchNormLayer` and :class:`NonlinearityLayer` on top.\n\n Parameters\n ----------\n layer : A :class:`Layer` instance\n The layer to apply the normalization to; note that it will be\n irreversibly modified as specified above\n **kwargs\n Any additional keyword arguments are passed on to the\n :class:`BatchNormLayer` constructor.\n\n Returns\n -------\n BatchNormLayer or NonlinearityLayer instance\n A batch normalization layer stacked on the given modified `layer`, or\n a nonlinearity layer stacked on top of both if `layer` was nonlinear.\n\n Examples\n --------\n Just wrap any layer into a :func:`batch_norm` call on creating it:\n\n >>> from lasagne.layers import InputLayer, DenseLayer, batch_norm\n >>> from lasagne.nonlinearities import tanh\n >>> l1 = InputLayer((64, 768))\n >>> l2 = batch_norm(DenseLayer(l1, num_units=500, nonlinearity=tanh))\n\n This introduces batch normalization right before its nonlinearity:\n\n >>> from lasagne.layers import get_all_layers\n >>> [l.__class__.__name__ for l in get_all_layers(l2)]\n ['InputLayer', 'DenseLayer', 'BatchNormLayer', 'NonlinearityLayer']\n "
nonlinearity = getattr(layer, 'nonlinearity', None)
if (nonlinearity is not None):
layer.nonlinearity = lasagne.nonlinearities.identity
if (hasattr(layer, 'b') and (layer.b is not None)):
del layer.params[layer.b]
layer.b = None
layer = BatchNormLayer(layer, **kwargs)
if (nonlinearity is not None):
layer = L.NonlinearityLayer(layer, nonlinearity)
return layer
|
class LasagnePowered(Parameterized):
def __init__(self, output_layers):
self._output_layers = output_layers
super(LasagnePowered, self).__init__()
@property
def output_layers(self):
return self._output_layers
@overrides
def get_params_internal(self, **tags):
return L.get_all_params(L.concat(self._output_layers), **tags)
|
def wrapped_conv(*args, **kwargs):
copy = dict(kwargs)
copy.pop('image_shape', None)
copy.pop('filter_shape', None)
assert copy.pop('filter_flip', False)
(input, W, input_shape, get_W_shape) = args
if (theano.config.device == 'cpu'):
return theano.tensor.nnet.conv2d(*args, **kwargs)
try:
return theano.sandbox.cuda.dnn.dnn_conv(input.astype('float32'), W.astype('float32'), **copy)
except Exception as e:
print('falling back to default conv2d')
return theano.tensor.nnet.conv2d(*args, **kwargs)
|
class MLP(LasagnePowered, Serializable):
def __init__(self, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=LI.GlorotUniform(), hidden_b_init=LI.Constant(0.0), output_W_init=LI.GlorotUniform(), output_b_init=LI.Constant(0.0), name=None, input_var=None, input_layer=None, input_shape=None, batch_norm=False):
Serializable.quick_init(self, locals())
if (name is None):
prefix = ''
else:
prefix = (name + '_')
if (input_layer is None):
l_in = L.InputLayer(shape=((None,) + input_shape), input_var=input_var)
else:
l_in = input_layer
self._layers = [l_in]
l_hid = l_in
for (idx, hidden_size) in enumerate(hidden_sizes):
l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('%shidden_%d' % (prefix, idx)), W=hidden_W_init, b=hidden_b_init)
if batch_norm:
l_hid = L.batch_norm(l_hid)
self._layers.append(l_hid)
l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name=('%soutput' % (prefix,)), W=output_W_init, b=output_b_init)
self._layers.append(l_out)
self._l_in = l_in
self._l_out = l_out
self._output = L.get_output(l_out)
LasagnePowered.__init__(self, [l_out])
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def layers(self):
return self._layers
@property
def output(self):
return self._output
|
class GRULayer(L.Layer):
'\n A gated recurrent unit implements the following update mechanism:\n Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)\n Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)\n Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) * (h(t-1) @ W_hc) + b_c)\n New hidden state: h(t) = (1 - u(t)) * h(t-1) + u_t * c(t)\n Note that the reset, update, and cell vectors must have the same dimension as the hidden state\n '
def __init__(self, incoming, num_units, hidden_nonlinearity, gate_nonlinearity=LN.sigmoid, name=None, W_init=LI.GlorotUniform(), b_init=LI.Constant(0.0), hidden_init=LI.Constant(0.0), hidden_init_trainable=True):
if (hidden_nonlinearity is None):
hidden_nonlinearity = LN.identity
if (gate_nonlinearity is None):
gate_nonlinearity = LN.identity
super(GRULayer, self).__init__(incoming, name=name)
input_shape = self.input_shape[2:]
input_dim = ext.flatten_shape_dim(input_shape)
self.h0 = self.add_param(hidden_init, (num_units,), name='h0', trainable=hidden_init_trainable, regularizable=False)
self.W_xr = self.add_param(W_init, (input_dim, num_units), name='W_xr')
self.W_hr = self.add_param(W_init, (num_units, num_units), name='W_hr')
self.b_r = self.add_param(b_init, (num_units,), name='b_r', regularizable=False)
self.W_xu = self.add_param(W_init, (input_dim, num_units), name='W_xu')
self.W_hu = self.add_param(W_init, (num_units, num_units), name='W_hu')
self.b_u = self.add_param(b_init, (num_units,), name='b_u', regularizable=False)
self.W_xc = self.add_param(W_init, (input_dim, num_units), name='W_xc')
self.W_hc = self.add_param(W_init, (num_units, num_units), name='W_hc')
self.b_c = self.add_param(b_init, (num_units,), name='b_c', regularizable=False)
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
def step(self, x, hprev):
r = self.gate_nonlinearity(((x.dot(self.W_xr) + hprev.dot(self.W_hr)) + self.b_r))
u = self.gate_nonlinearity(((x.dot(self.W_xu) + hprev.dot(self.W_hu)) + self.b_u))
c = self.nonlinearity(((x.dot(self.W_xc) + (r * hprev.dot(self.W_hc))) + self.b_c))
h = (((1 - u) * hprev) + (u * c))
return h.astype(theano.config.floatX)
def get_step_layer(self, l_in, l_prev_hidden):
return GRUStepLayer(incomings=[l_in, l_prev_hidden], gru_layer=self)
def get_output_shape_for(self, input_shape):
(n_batch, n_steps) = input_shape[:2]
return (n_batch, n_steps, self.num_units)
def get_output_for(self, input, **kwargs):
n_batches = input.shape[0]
n_steps = input.shape[1]
input = TT.reshape(input, (n_batches, n_steps, (- 1)))
h0s = TT.tile(TT.reshape(self.h0, (1, self.num_units)), (n_batches, 1))
shuffled_input = input.dimshuffle(1, 0, 2)
(hs, _) = theano.scan(fn=self.step, sequences=[shuffled_input], outputs_info=h0s)
shuffled_hs = hs.dimshuffle(1, 0, 2)
return shuffled_hs
|
class GRUStepLayer(L.MergeLayer):
def __init__(self, incomings, gru_layer, name=None):
super(GRUStepLayer, self).__init__(incomings, name)
self._gru_layer = gru_layer
def get_params(self, **tags):
return self._gru_layer.get_params(**tags)
def get_output_shape_for(self, input_shapes):
n_batch = input_shapes[0]
return (n_batch, self._gru_layer.num_units)
def get_output_for(self, inputs, **kwargs):
(x, hprev) = inputs
n_batch = x.shape[0]
x = x.reshape((n_batch, (- 1)))
return self._gru_layer.step(x, hprev)
|
class GRUNetwork(object):
def __init__(self, input_shape, output_dim, hidden_dim, hidden_nonlinearity=LN.rectify, output_nonlinearity=None, name=None, input_var=None, input_layer=None):
if (input_layer is None):
l_in = L.InputLayer(shape=((None, None) + input_shape), input_var=input_var, name='input')
else:
l_in = input_layer
l_step_input = L.InputLayer(shape=((None,) + input_shape))
l_step_prev_hidden = L.InputLayer(shape=(None, hidden_dim))
l_gru = GRULayer(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, hidden_init_trainable=False)
l_gru_flat = L.ReshapeLayer(l_gru, shape=((- 1), hidden_dim))
l_output_flat = L.DenseLayer(l_gru_flat, num_units=output_dim, nonlinearity=output_nonlinearity)
l_output = OpLayer(l_output_flat, op=(lambda flat_output, l_input: flat_output.reshape((l_input.shape[0], l_input.shape[1], (- 1)))), shape_op=(lambda flat_output_shape, l_input_shape: (l_input_shape[0], l_input_shape[1], flat_output_shape[(- 1)])), extras=[l_in])
l_step_hidden = l_gru.get_step_layer(l_step_input, l_step_prev_hidden)
l_step_output = L.DenseLayer(l_step_hidden, num_units=output_dim, nonlinearity=output_nonlinearity, W=l_output_flat.W, b=l_output_flat.b)
self._l_in = l_in
self._hid_init_param = l_gru.h0
self._l_gru = l_gru
self._l_out = l_output
self._l_step_input = l_step_input
self._l_step_prev_hidden = l_step_prev_hidden
self._l_step_hidden = l_step_hidden
self._l_step_output = l_step_output
@property
def input_layer(self):
return self._l_in
@property
def input_var(self):
return self._l_in.input_var
@property
def output_layer(self):
return self._l_out
@property
def step_input_layer(self):
return self._l_step_input
@property
def step_prev_hidden_layer(self):
return self._l_step_prev_hidden
@property
def step_hidden_layer(self):
return self._l_step_hidden
@property
def step_output_layer(self):
return self._l_step_output
@property
def hid_init_param(self):
return self._hid_init_param
|
class ConvNetwork(object):
def __init__(self, input_shape, output_dim, hidden_sizes, conv_filters, conv_filter_sizes, conv_strides, conv_pads, hidden_W_init=LI.GlorotUniform(), hidden_b_init=LI.Constant(0.0), output_W_init=LI.GlorotUniform(), output_b_init=LI.Constant(0.0), hidden_nonlinearity=LN.rectify, output_nonlinearity=LN.softmax, name=None, input_var=None):
if (name is None):
prefix = ''
else:
prefix = (name + '_')
if (len(input_shape) == 3):
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var)
l_hid = L.reshape(l_in, (([0],) + input_shape))
elif (len(input_shape) == 2):
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var)
input_shape = ((1,) + input_shape)
l_hid = L.reshape(l_in, (([0],) + input_shape))
else:
l_in = L.InputLayer(shape=((None,) + input_shape), input_var=input_var)
l_hid = l_in
for (idx, conv_filter, filter_size, stride, pad) in zip(range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads):
l_hid = L.Conv2DLayer(l_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name=('%sconv_hidden_%d' % (prefix, idx)), convolution=wrapped_conv)
for (idx, hidden_size) in enumerate(hidden_sizes):
l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('%shidden_%d' % (prefix, idx)), W=hidden_W_init, b=hidden_b_init)
l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name=('%soutput' % (prefix,)), W=output_W_init, b=output_b_init)
self._l_in = l_in
self._l_out = l_out
self._input_var = l_in.input_var
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
|
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
(yield)
load_params = True
|
class Parameterized(object):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
def get_params_internal(self, **tags):
'\n Internal method to be implemented which does not perform caching\n '
raise NotImplementedError
def get_params(self, **tags):
"\n Get the list of parameters, filtered by the provided tags.\n Some common tags include 'regularizable' and 'trainable'\n "
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_params):
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_dtypes):
self._cached_param_dtypes[tag_tuple] = [param.get_value(borrow=True).dtype for param in self.get_params(**tags)]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_shapes):
self._cached_param_shapes[tag_tuple] = [param.get_value(borrow=True).shape for param in self.get_params(**tags)]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
return flatten_tensors([param.get_value(borrow=True) for param in self.get_params(**tags)])
def set_param_values(self, flattened_params, **tags):
debug = tags.pop('debug', False)
param_values = unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
for (param, dtype, value) in zip(self.get_params(**tags), self.get_param_dtypes(**tags), param_values):
param.set_value(value.astype(dtype))
if debug:
print(('setting value of %s' % param.name))
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
d['params'] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
self.set_param_values(d['params'])
|
class Serializable(object):
def __init__(self, *args, **kwargs):
self.__args = args
self.__kwargs = kwargs
def quick_init(self, locals_):
if getattr(self, '_serializable_initialized', False):
return
spec = inspect.getargspec(self.__init__)
in_order_args = [locals_[arg] for arg in spec.args][1:]
if spec.varargs:
varargs = locals_[spec.varargs]
else:
varargs = tuple()
if spec.keywords:
kwargs = locals_[spec.keywords]
else:
kwargs = dict()
self.__args = (tuple(in_order_args) + varargs)
self.__kwargs = kwargs
setattr(self, '_serializable_initialized', True)
def __getstate__(self):
return {'__args': self.__args, '__kwargs': self.__kwargs}
def __setstate__(self, d):
in_order_args = inspect.getargspec(self.__init__).args[1:]
out = type(self)(**dict(zip(in_order_args, d['__args']), **d['__kwargs']))
self.__dict__.update(out.__dict__)
@classmethod
def clone(cls, obj, **kwargs):
assert isinstance(obj, Serializable)
d = obj.__getstate__()
d['__kwargs'] = dict(d['__kwargs'], **kwargs)
out = type(obj).__new__(type(obj))
out.__setstate__(d)
return out
|
class Distribution(object):
@property
def dim(self):
raise NotImplementedError
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
'\n Compute the symbolic KL divergence of two distributions\n '
raise NotImplementedError
def kl(self, old_dist_info, new_dist_info):
'\n Compute the KL divergence of two distributions\n '
raise NotImplementedError
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def likelihood_sym(self, x_var, dist_info_vars):
return TT.exp(self.log_likelihood_sym(x_var, dist_info_vars))
def log_likelihood(self, xs, dist_info):
raise NotImplementedError
@property
def dist_info_keys(self):
raise NotImplementedError
|
class Bernoulli(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
kl = ((old_p * (TT.log((old_p + TINY)) - TT.log((new_p + TINY)))) + ((1 - old_p) * (TT.log(((1 - old_p) + TINY)) - TT.log(((1 - new_p) + TINY)))))
return TT.sum(kl, axis=(- 1))
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info['p']
new_p = new_dist_info['p']
kl = ((old_p * (np.log((old_p + TINY)) - np.log((new_p + TINY)))) + ((1 - old_p) * (np.log(((1 - old_p) + TINY)) - np.log(((1 - new_p) + TINY)))))
return np.sum(kl, axis=(- 1))
def sample(self, dist_info):
p = np.asarray(dist_info['p'])
return np.cast['int']((np.random.uniform(low=0.0, high=1.0, size=p.shape) < p))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
return TT.prod((((x_var * new_p) / (old_p + TINY)) + (((1 - x_var) * (1 - new_p)) / ((1 - old_p) + TINY))), axis=(- 1))
def log_likelihood_sym(self, x_var, dist_info_vars):
p = dist_info_vars['p']
return TT.sum(((x_var * TT.log((p + TINY))) + ((1 - x_var) * TT.log(((1 - p) + TINY)))), axis=(- 1))
def log_likelihood(self, xs, dist_info):
p = dist_info['p']
return np.sum(((xs * np.log((p + TINY))) + ((1 - xs) * np.log(((1 - p) + TINY)))), axis=(- 1))
def entropy(self, dist_info):
p = dist_info['p']
return np.sum((((- p) * np.log((p + TINY))) - ((1 - p) * np.log(((1 - p) + TINY)))), axis=(- 1))
@property
def dist_info_keys(self):
return ['p']
|
def from_onehot(x_var):
ret = np.zeros((len(x_var),), 'int32')
(nonzero_n, nonzero_a) = np.nonzero(x_var)
ret[nonzero_n] = nonzero_a
return ret
|
class Categorical(Distribution):
def __init__(self, dim):
self._dim = dim
self._srng = RandomStreams()
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
'\n Compute the symbolic KL divergence of two categorical distributions\n '
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
return TT.sum((old_prob_var * (TT.log((old_prob_var + TINY)) - TT.log((new_prob_var + TINY)))), axis=(- 1))
def kl(self, old_dist_info, new_dist_info):
'\n Compute the KL divergence of two categorical distributions\n '
old_prob = old_dist_info['prob']
new_prob = new_dist_info['prob']
return np.sum((old_prob * (np.log((old_prob + TINY)) - np.log((new_prob + TINY)))), axis=(- 1))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
x_var = TT.cast(x_var, 'float32')
return ((TT.sum((new_prob_var * x_var), axis=(- 1)) + TINY) / (TT.sum((old_prob_var * x_var), axis=(- 1)) + TINY))
def entropy(self, info):
probs = info['prob']
return (- np.sum((probs * np.log((probs + TINY))), axis=1))
def entropy_sym(self, dist_info_vars):
prob_var = dist_info_vars['prob']
return (- TT.sum((prob_var * TT.log((prob_var + TINY))), axis=1))
def log_likelihood_sym(self, x_var, dist_info_vars):
probs = dist_info_vars['prob']
return TT.log((TT.sum((probs * TT.cast(x_var, 'float32')), axis=(- 1)) + TINY))
def log_likelihood(self, xs, dist_info):
probs = dist_info['prob']
N = probs.shape[0]
return np.log((probs[(np.arange(N), from_onehot(np.asarray(xs)))] + TINY))
def sample_sym(self, dist_info):
probs = dist_info['prob']
return self._srng.multinomial(pvals=probs, dtype='uint8')
@property
def dist_info_keys(self):
return ['prob']
|
class Delta(Distribution):
@property
def dim(self):
return 0
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
return None
def kl(self, old_dist_info, new_dist_info):
return None
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def likelihood_sym(self, x_var, dist_info_vars):
return TT.exp(self.log_likelihood_sym(x_var, dist_info_vars))
def log_likelihood(self, xs, dist_info):
return None
@property
def dist_info_keys(self):
return None
def entropy(self, dist_info):
return 0
|
class DiagonalGaussian(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars['mean']
old_log_stds = old_dist_info_vars['log_std']
new_means = new_dist_info_vars['mean']
new_log_stds = new_dist_info_vars['log_std']
'\n Compute the KL divergence of two multivariate Gaussian distribution with\n diagonal covariance matrices\n '
old_std = TT.exp(old_log_stds)
new_std = TT.exp(new_log_stds)
numerator = ((TT.square((old_means - new_means)) + TT.square(old_std)) - TT.square(new_std))
denominator = ((2 * TT.square(new_std)) + 1e-08)
return TT.sum((((numerator / denominator) + new_log_stds) - old_log_stds), axis=(- 1))
def kl(self, old_dist_info, new_dist_info):
old_means = old_dist_info['mean']
old_log_stds = old_dist_info['log_std']
new_means = new_dist_info['mean']
new_log_stds = new_dist_info['log_std']
'\n Compute the KL divergence of two multivariate Gaussian distribution with\n diagonal covariance matrices\n '
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
numerator = ((np.square((old_means - new_means)) + np.square(old_std)) - np.square(new_std))
denominator = ((2 * np.square(new_std)) + 1e-08)
return np.sum((((numerator / denominator) + new_log_stds) - old_log_stds), axis=(- 1))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)
logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)
return TT.exp((logli_new - logli_old))
def log_likelihood_sym(self, x_var, dist_info_vars):
means = dist_info_vars['mean']
log_stds = dist_info_vars['log_std']
zs = ((x_var - means) / TT.exp(log_stds))
return (((- TT.sum(log_stds, axis=(- 1))) - (0.5 * TT.sum(TT.square(zs), axis=(- 1)))) - ((0.5 * means.shape[(- 1)]) * np.log((2 * np.pi))))
def sample(self, dist_info):
means = dist_info['mean']
log_stds = dist_info['log_std']
rnd = np.random.normal(size=means.shape)
return ((rnd * np.exp(log_stds)) + means)
def log_likelihood(self, xs, dist_info):
means = dist_info['mean']
log_stds = dist_info['log_std']
zs = ((xs - means) / np.exp(log_stds))
return (((- np.sum(log_stds, axis=(- 1))) - (0.5 * np.sum(np.square(zs), axis=(- 1)))) - ((0.5 * means.shape[(- 1)]) * np.log((2 * np.pi))))
def entropy(self, dist_info):
log_stds = dist_info['log_std']
return np.sum((log_stds + np.log(np.sqrt(((2 * np.pi) * np.e)))), axis=(- 1))
def entropy_sym(self, dist_info_var):
log_std_var = dist_info_var['log_std']
return TT.sum((log_std_var + TT.log(np.sqrt(((2 * np.pi) * np.e)))), axis=(- 1))
@property
def dist_info_keys(self):
return ['mean', 'log_std']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.