Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|> my_env = os.environ.copy()
my_env["OCF_ROOT"] = config.path.ocf_root
for k, v in params.items():
my_env["OCF_RESKEY_" + k] = v
cmd = [os.path.join(config.path.ocf_root, "resource.d", agent.ra_provider, agent.ra_type), "validate-all"]
if options.regression_tests:
print(".EXT", " ".join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=my_env)
_out, _ = p.communicate()
out = to_ascii(_out)
p.wait()
if log is True:
for msg in out.splitlines():
if msg.startswith("ERROR: "):
logger.error(msg[7:])
elif msg.startswith("WARNING: "):
logger.warning(msg[9:])
elif msg.startswith("INFO: "):
logger.info(msg[6:])
elif msg.startswith("DEBUG: "):
logger.debug(msg[7:])
else:
logger.info(msg)
return p.returncode, out
DLM_RA_SCRIPTS = """
primitive {id} ocf:pacemaker:controld \
op start timeout=90 \
<|code_end|>
, generate the next line using the imports in this file:
import os
import subprocess
import copy
import re
import glob
from lxml import etree
from . import cache
from . import constants
from . import config
from . import options
from . import userdir
from . import utils
from .utils import stdout2list, is_program, is_process, to_ascii
from .utils import os_types_list, get_stdout
from .utils import crm_msec, crm_time_cmp
from . import log
from distutils import version
and context (functions, classes, or occasionally code) from other files:
# Path: crmsh/utils.py
# def stdout2list(cmd, stderr_on=True, shell=True):
# '''
# Run a cmd, fetch output, return it as a list of lines.
# stderr_on controls whether to show output which comes on stderr.
# '''
# rc, s = get_stdout(add_sudo(cmd), stderr_on=stderr_on, shell=shell)
# if not s:
# return rc, []
# return rc, s.split('\n')
#
# def is_program(prog):
# """Is this program available?"""
# def isexec(filename):
# return os.path.isfile(filename) and os.access(filename, os.X_OK)
# for p in os.getenv("PATH").split(os.pathsep):
# f = os.path.join(p, prog)
# if isexec(f):
# return f
# return None
#
# def is_process(s):
# """
# Returns true if argument is the name of a running process.
#
# s: process name
# returns Boolean
# """
# from os.path import join, basename
# # find pids of running processes
# pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
# for pid in pids:
# try:
# cmdline = open(join('/proc', pid, 'cmdline'), 'rb').read()
# procname = basename(to_ascii(cmdline).replace('\x00', ' ').split(' ')[0])
# if procname == s:
# return True
# except EnvironmentError:
# # a process may have died since we got the list of pids
# pass
# return False
#
# def to_ascii(input_str):
# """Convert the bytes string to a ASCII string
# Usefull to remove accent (diacritics)"""
# if input_str is None:
# return input_str
# if isinstance(input_str, str):
# return input_str
# try:
# return str(input_str, 'utf-8')
# except UnicodeDecodeError:
# if config.core.debug or options.regression_tests:
# import traceback
# traceback.print_exc()
# return input_str.decode('utf-8', errors='ignore')
#
# Path: crmsh/utils.py
# def os_types_list(path):
# l = []
# for f in glob.glob(path):
# if os.access(f, os.X_OK) and os.path.isfile(f):
# a = f.split("/")
# l.append(a[-1])
# return l
#
# def get_stdout(cmd, input_s=None, stderr_on=True, shell=True, raw=False):
# '''
# Run a cmd, return stdout output.
# Optional input string "input_s".
# stderr_on controls whether to show output which comes on stderr.
# '''
# if stderr_on:
# stderr = None
# else:
# stderr = subprocess.PIPE
# if options.regression_tests:
# print(".EXT", cmd)
# proc = subprocess.Popen(cmd,
# shell=shell,
# stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stderr=stderr)
# stdout_data, stderr_data = proc.communicate(input_s)
# if raw:
# return proc.returncode, stdout_data
# return proc.returncode, to_ascii(stdout_data).strip()
#
# Path: crmsh/utils.py
# def crm_msec(t):
# '''
# See lib/common/utils.c:crm_get_msec().
# '''
# convtab = {
# 'ms': (1, 1),
# 'msec': (1, 1),
# 'us': (1, 1000),
# 'usec': (1, 1000),
# '': (1000, 1),
# 's': (1000, 1),
# 'sec': (1000, 1),
# 'm': (60*1000, 1),
# 'min': (60*1000, 1),
# 'h': (60*60*1000, 1),
# 'hr': (60*60*1000, 1),
# }
# if not t:
# return -1
# r = re.match(r"\s*(\d+)\s*([a-zA-Z]+)?", t)
# if not r:
# return -1
# if not r.group(2):
# q = ''
# else:
# q = r.group(2).lower()
# try:
# mult, div = convtab[q]
# except KeyError:
# return -1
# return (int(r.group(1))*mult) // div
#
# def crm_time_cmp(a, b):
# return crm_msec(a) - crm_msec(b)
. Output only the next line. | op stop timeout=100 \ |
Predict the next line after this snippet: <|code_start|> if msg.startswith("ERROR: "):
logger.error(msg[7:])
elif msg.startswith("WARNING: "):
logger.warning(msg[9:])
elif msg.startswith("INFO: "):
logger.info(msg[6:])
elif msg.startswith("DEBUG: "):
logger.debug(msg[7:])
else:
logger.info(msg)
return p.returncode, out
DLM_RA_SCRIPTS = """
primitive {id} ocf:pacemaker:controld \
op start timeout=90 \
op stop timeout=100 \
op monitor interval=60 timeout=60"""
FILE_SYSTEM_RA_SCRIPTS = """
primitive {id} ocf:heartbeat:Filesystem \
params directory="{mnt_point}" fstype="{fs_type}" device="{device}" \
op monitor interval=20 timeout=40 \
op start timeout=60 \
op stop timeout=60"""
LVMLOCKD_RA_SCRIPTS = """
primitive {id} ocf:heartbeat:lvmlockd \
op start timeout=90 \
op stop timeout=100 \
op monitor interval=30 timeout=90"""
LVMACTIVATE_RA_SCRIPTS = """
<|code_end|>
using the current file's imports:
import os
import subprocess
import copy
import re
import glob
from lxml import etree
from . import cache
from . import constants
from . import config
from . import options
from . import userdir
from . import utils
from .utils import stdout2list, is_program, is_process, to_ascii
from .utils import os_types_list, get_stdout
from .utils import crm_msec, crm_time_cmp
from . import log
from distutils import version
and any relevant context from other files:
# Path: crmsh/utils.py
# def stdout2list(cmd, stderr_on=True, shell=True):
# '''
# Run a cmd, fetch output, return it as a list of lines.
# stderr_on controls whether to show output which comes on stderr.
# '''
# rc, s = get_stdout(add_sudo(cmd), stderr_on=stderr_on, shell=shell)
# if not s:
# return rc, []
# return rc, s.split('\n')
#
# def is_program(prog):
# """Is this program available?"""
# def isexec(filename):
# return os.path.isfile(filename) and os.access(filename, os.X_OK)
# for p in os.getenv("PATH").split(os.pathsep):
# f = os.path.join(p, prog)
# if isexec(f):
# return f
# return None
#
# def is_process(s):
# """
# Returns true if argument is the name of a running process.
#
# s: process name
# returns Boolean
# """
# from os.path import join, basename
# # find pids of running processes
# pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
# for pid in pids:
# try:
# cmdline = open(join('/proc', pid, 'cmdline'), 'rb').read()
# procname = basename(to_ascii(cmdline).replace('\x00', ' ').split(' ')[0])
# if procname == s:
# return True
# except EnvironmentError:
# # a process may have died since we got the list of pids
# pass
# return False
#
# def to_ascii(input_str):
# """Convert the bytes string to a ASCII string
# Usefull to remove accent (diacritics)"""
# if input_str is None:
# return input_str
# if isinstance(input_str, str):
# return input_str
# try:
# return str(input_str, 'utf-8')
# except UnicodeDecodeError:
# if config.core.debug or options.regression_tests:
# import traceback
# traceback.print_exc()
# return input_str.decode('utf-8', errors='ignore')
#
# Path: crmsh/utils.py
# def os_types_list(path):
# l = []
# for f in glob.glob(path):
# if os.access(f, os.X_OK) and os.path.isfile(f):
# a = f.split("/")
# l.append(a[-1])
# return l
#
# def get_stdout(cmd, input_s=None, stderr_on=True, shell=True, raw=False):
# '''
# Run a cmd, return stdout output.
# Optional input string "input_s".
# stderr_on controls whether to show output which comes on stderr.
# '''
# if stderr_on:
# stderr = None
# else:
# stderr = subprocess.PIPE
# if options.regression_tests:
# print(".EXT", cmd)
# proc = subprocess.Popen(cmd,
# shell=shell,
# stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stderr=stderr)
# stdout_data, stderr_data = proc.communicate(input_s)
# if raw:
# return proc.returncode, stdout_data
# return proc.returncode, to_ascii(stdout_data).strip()
#
# Path: crmsh/utils.py
# def crm_msec(t):
# '''
# See lib/common/utils.c:crm_get_msec().
# '''
# convtab = {
# 'ms': (1, 1),
# 'msec': (1, 1),
# 'us': (1, 1000),
# 'usec': (1, 1000),
# '': (1000, 1),
# 's': (1000, 1),
# 'sec': (1000, 1),
# 'm': (60*1000, 1),
# 'min': (60*1000, 1),
# 'h': (60*60*1000, 1),
# 'hr': (60*60*1000, 1),
# }
# if not t:
# return -1
# r = re.match(r"\s*(\d+)\s*([a-zA-Z]+)?", t)
# if not r:
# return -1
# if not r.group(2):
# q = ''
# else:
# q = r.group(2).lower()
# try:
# mult, div = convtab[q]
# except KeyError:
# return -1
# return (int(r.group(1))*mult) // div
#
# def crm_time_cmp(a, b):
# return crm_msec(a) - crm_msec(b)
. Output only the next line. | primitive {id} ocf:heartbeat:LVM-activate \ |
Continue the code snippet: <|code_start|> print(".EXT", " ".join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=my_env)
_out, _ = p.communicate()
out = to_ascii(_out)
p.wait()
if log is True:
for msg in out.splitlines():
if msg.startswith("ERROR: "):
logger.error(msg[7:])
elif msg.startswith("WARNING: "):
logger.warning(msg[9:])
elif msg.startswith("INFO: "):
logger.info(msg[6:])
elif msg.startswith("DEBUG: "):
logger.debug(msg[7:])
else:
logger.info(msg)
return p.returncode, out
DLM_RA_SCRIPTS = """
primitive {id} ocf:pacemaker:controld \
op start timeout=90 \
op stop timeout=100 \
op monitor interval=60 timeout=60"""
FILE_SYSTEM_RA_SCRIPTS = """
primitive {id} ocf:heartbeat:Filesystem \
params directory="{mnt_point}" fstype="{fs_type}" device="{device}" \
op monitor interval=20 timeout=40 \
<|code_end|>
. Use current file imports:
import os
import subprocess
import copy
import re
import glob
from lxml import etree
from . import cache
from . import constants
from . import config
from . import options
from . import userdir
from . import utils
from .utils import stdout2list, is_program, is_process, to_ascii
from .utils import os_types_list, get_stdout
from .utils import crm_msec, crm_time_cmp
from . import log
from distutils import version
and context (classes, functions, or code) from other files:
# Path: crmsh/utils.py
# def stdout2list(cmd, stderr_on=True, shell=True):
# '''
# Run a cmd, fetch output, return it as a list of lines.
# stderr_on controls whether to show output which comes on stderr.
# '''
# rc, s = get_stdout(add_sudo(cmd), stderr_on=stderr_on, shell=shell)
# if not s:
# return rc, []
# return rc, s.split('\n')
#
# def is_program(prog):
# """Is this program available?"""
# def isexec(filename):
# return os.path.isfile(filename) and os.access(filename, os.X_OK)
# for p in os.getenv("PATH").split(os.pathsep):
# f = os.path.join(p, prog)
# if isexec(f):
# return f
# return None
#
# def is_process(s):
# """
# Returns true if argument is the name of a running process.
#
# s: process name
# returns Boolean
# """
# from os.path import join, basename
# # find pids of running processes
# pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
# for pid in pids:
# try:
# cmdline = open(join('/proc', pid, 'cmdline'), 'rb').read()
# procname = basename(to_ascii(cmdline).replace('\x00', ' ').split(' ')[0])
# if procname == s:
# return True
# except EnvironmentError:
# # a process may have died since we got the list of pids
# pass
# return False
#
# def to_ascii(input_str):
# """Convert the bytes string to a ASCII string
# Usefull to remove accent (diacritics)"""
# if input_str is None:
# return input_str
# if isinstance(input_str, str):
# return input_str
# try:
# return str(input_str, 'utf-8')
# except UnicodeDecodeError:
# if config.core.debug or options.regression_tests:
# import traceback
# traceback.print_exc()
# return input_str.decode('utf-8', errors='ignore')
#
# Path: crmsh/utils.py
# def os_types_list(path):
# l = []
# for f in glob.glob(path):
# if os.access(f, os.X_OK) and os.path.isfile(f):
# a = f.split("/")
# l.append(a[-1])
# return l
#
# def get_stdout(cmd, input_s=None, stderr_on=True, shell=True, raw=False):
# '''
# Run a cmd, return stdout output.
# Optional input string "input_s".
# stderr_on controls whether to show output which comes on stderr.
# '''
# if stderr_on:
# stderr = None
# else:
# stderr = subprocess.PIPE
# if options.regression_tests:
# print(".EXT", cmd)
# proc = subprocess.Popen(cmd,
# shell=shell,
# stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stderr=stderr)
# stdout_data, stderr_data = proc.communicate(input_s)
# if raw:
# return proc.returncode, stdout_data
# return proc.returncode, to_ascii(stdout_data).strip()
#
# Path: crmsh/utils.py
# def crm_msec(t):
# '''
# See lib/common/utils.c:crm_get_msec().
# '''
# convtab = {
# 'ms': (1, 1),
# 'msec': (1, 1),
# 'us': (1, 1000),
# 'usec': (1, 1000),
# '': (1000, 1),
# 's': (1000, 1),
# 'sec': (1000, 1),
# 'm': (60*1000, 1),
# 'min': (60*1000, 1),
# 'h': (60*60*1000, 1),
# 'hr': (60*60*1000, 1),
# }
# if not t:
# return -1
# r = re.match(r"\s*(\d+)\s*([a-zA-Z]+)?", t)
# if not r:
# return -1
# if not r.group(2):
# q = ''
# else:
# q = r.group(2).lower()
# try:
# mult, div = convtab[q]
# except KeyError:
# return -1
# return (int(r.group(1))*mult) // div
#
# def crm_time_cmp(a, b):
# return crm_msec(a) - crm_msec(b)
. Output only the next line. | op start timeout=60 \ |
Given the code snippet: <|code_start|> 'release': release,
'version': version,
'machine': machine,
'processor': processor,
'distname': distname,
'user': get_user(),
'hostname': hostname,
'uptime': uptime[0],
'idletime': uptime[1],
'loadavg': loadavg[2] # 15 minute average
}
def disk_info():
rc, out, err = crm_script.call(['df'], shell=False)
if rc == 0:
disk_use = []
for line in out.split('\n')[1:]:
line = line.strip()
if line:
data = line.split()
if len(data) >= 6:
disk_use.append((data[5], data[4]))
return disk_use
return []
# configurations out of sync
FILES = [
<|code_end|>
, generate the next line using the imports in this file:
from builtins import str
from crmsh.report import utillib
import os
import pwd
import hashlib
import platform
import crm_script
and context (functions, classes, or occasionally code) from other files:
# Path: crmsh/report/utillib.py
# class Tempfile(object):
# def __init__(self):
# def add(self, filename):
# def drop(self):
# def add_tempfiles(filename):
# def _mkdir(directory):
# def arch_logs(logf, from_time, to_time):
# def analyze():
# def analyze_one(workdir, file_):
# def base_check():
# def booth_info():
# def check_backtraces(workdir):
# def check_crmvfy(workdir):
# def check_env():
# def check_if_log_is_empty():
# def check_logs(workdir):
# def check_permissions(workdir):
# def check_time(var, option):
# def cib_diff(file1, file2):
# def cluster_info():
# def generate_collect_functions():
# def collect_info():
# def collect_journal(from_t, to_t, outf):
# def compatibility_pcmk():
# def consolidate(workdir, f):
# def create_tempfile(time=None):
# def date():
# def diff_check(file1, file2):
# def get_distro_info():
# def dump_log(logf, from_line, to_line):
# def dump_logset(logf, from_time, to_time, outf):
# def dump_state(workdir):
# def events(destdir):
# def find_decompressor(log_file):
# def find_files(dirs, from_time, to_time):
# def find_files_all(name, path):
# def find_first_ts(data):
# def filter_lines(data, from_line, to_line):
# def finalword():
# def find_getstampproc(log_file):
# def find_getstampproc_raw(line):
# def find_log():
# def find_ssh_user():
# def findln_by_time(data, ts):
# def find_binary_for_core(corefile):
# def findbin(fname):
# def isexec(filename):
# def print_core_backtraces(flist):
# def get_cib_dir():
# def get_command_info(cmd):
# def get_command_info_timeout(cmd, timeout=5):
# def kill(process):
# def get_conf_var(option, default=None):
# def get_crm_daemon_dir():
# def get_dirname(path):
# def get_local_ip():
# def get_log_vars():
# def get_nodes():
# def get_peer_ip():
# def get_ocf_dir():
# def get_pe_state_dir():
# def get_pkg_mgr():
# def get_stamp_legacy(line):
# def get_stamp_rfc5424(line):
# def get_stamp_syslog(line):
# def get_ts(line):
# def grep(pattern, infile=None, incmd=None, flag=None):
# def grep_file(pattern, infile, flag):
# def grep_row(pattern, indata, flag):
# def head(n, indata):
# def is_conf_set(option, subsys=None):
# def is_exec(filename):
# def is_our_log(logf, from_time, to_time):
# def line_time(data_list, line_num):
# def load_ocf_dirs():
# def log_fatal(msg):
# def log_size(logf, outf):
# def make_temp_dir():
# def mktemplate(argv):
# def node_needs_pwd(node):
# def pe_to_dot(pe_file):
# def pick_compress():
# def pick_first(choice):
# def pkg_ver_deb(packages):
# def pkg_ver_pkg_info(packages):
# def pkg_ver_pkginfo(packages):
# def pkg_ver_rpm(packages):
# def pkg_versions(packages):
# def print_log(logf):
# def print_logseg(logf, from_time, to_time):
# def ra_build_info():
# def random_string(num):
# def sanitize():
# def sanitize_one(in_file):
# def parse_sanitize_rule(rule_string):
# def say_ssh_user():
# def sed_inplace(filename, pattern, repl):
# def set_env():
# def stdchannel_redirected(stdchannel, dest_filename):
# def start_slave_collector(node, arg_str):
# def str_to_bool(v):
# def tail(n, indata):
# def test_ssh_conn(addr):
# def dump_D_process():
# def lsof_ocfs2_device():
# def touch_r(src, dst):
# def ts_to_dt(timestamp):
# def txt_diff(file1, file2):
# def verify_deb(packages):
# def verify_packages(packages):
# def verify_pkg_info(packages):
# def verify_pkginfo(packages):
# def verify_rpm(packages):
# def which(prog):
# def get_open_method(infile):
# def read_from_file(infile):
# def write_to_file(tofile, data):
# def get_sensitive_key_value_list():
# def extract_sensitive_value_list(rule):
# def include_sensitive_data(data):
# def sub_sensitive_string(data):
# INVERT = False
# SHOWNUM = False
# INVERT = True
# SHOWNUM = True
. Output only the next line. | '/etc/csync2/key_hagroup', |
Using the snippet: <|code_start|># Copyright (C) 2013 Dejan Muhamedagic <dmuhamedagic@suse.de>
# See COPYING for license information.
logger = log.setup_logger(__name__)
# graphviz stuff
def _attr_str(attr_d):
return ','.join(['%s="%s"' % (k, v)
for k, v in attr_d.items()])
<|code_end|>
, determine the next line of code. You have imports:
import re
from . import config
from . import tmpfiles
from . import utils
from .ordereddict import odict
from . import log
and context (class names, function names, or code) available:
# Path: crmsh/ordereddict.py
# class OrderedDict(dict, DictMixin):
# def __init__(self, *args, **kwds):
# def clear(self):
# def __setitem__(self, key, value):
# def __delitem__(self, key):
# def __iter__(self):
# def __reversed__(self):
# def popitem(self, last=True):
# def __reduce__(self):
# def keys(self):
# def __repr__(self):
# def copy(self):
# def fromkeys(cls, iterable, value=None):
# def __eq__(self, other):
# def __ne__(self, other):
. Output only the next line. | def _quoted(name): |
Given snippet: <|code_start|> #print out
out = self._parse('location loc-1 thing rule role=slave -inf: #uname eq madrid')
self.assertEqual(out.get('id'), 'loc-1')
self.assertEqual(out.get('rsc'), 'thing')
self.assertEqual(out.get('score'), None)
out = self._parse('location l { a:foo b:bar }')
self.assertFalse(out)
@mock.patch('logging.Logger.error')
def test_colocation(self, mock_error):
out = self._parse('colocation col-1 inf: foo:master ( bar wiz sequential=yes )')
self.assertEqual(out.get('id'), 'col-1')
self.assertEqual(['foo', 'bar', 'wiz'], out.xpath('//resource_ref/@id'))
self.assertEqual([], out.xpath('//resource_set[@name="sequential"]/@value'))
out = self._parse(
'colocation col-1 -20: foo:Master ( bar wiz ) ( zip zoo ) node-attribute="fiz"')
self.assertEqual(out.get('id'), 'col-1')
self.assertEqual(out.get('score'), '-20')
self.assertEqual(['foo', 'bar', 'wiz', 'zip', 'zoo'], out.xpath('//resource_ref/@id'))
self.assertEqual(['fiz'], out.xpath('//@node-attribute'))
out = self._parse('colocation col-1 0: a:master b')
self.assertEqual(out.get('id'), 'col-1')
out = self._parse('colocation col-1 10: ) bar wiz')
self.assertFalse(out)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import mock
from builtins import zip
from crmsh import parse
from crmsh.utils import lines2cli
from crmsh.xmlutil import xml_tostring
from lxml import etree
import mock
import unittest
import shlex
and context:
# Path: crmsh/parse.py
# def parse(self, cmd):
# "Called by do_parse(). Raises ParseError if parsing fails."
# raise NotImplementedError
#
# Path: crmsh/utils.py
# def lines2cli(s):
# '''
# Convert a string into a list of lines. Replace continuation
# characters. Strip white space, left and right. Drop empty lines.
# '''
# cl = []
# l = s.split('\n')
# cum = []
# for p in l:
# p = p.strip()
# if p.endswith('\\'):
# p = p.rstrip('\\')
# cum.append(p)
# else:
# cum.append(p)
# cl.append(''.join(cum).strip())
# cum = []
# if cum: # in case s ends with backslash
# cl.append(''.join(cum))
# return [x for x in cl if x]
#
# Path: crmsh/xmlutil.py
# def xml_tostring(*args, **kwargs):
# """
# Python 2/3 conversion utility:
# etree.tostring returns a bytestring, but
# we need actual Python strings.
# """
# return etree.tostring(*args, **kwargs).decode('utf-8')
which might include code, classes, or functions. Output only the next line. | out = self._parse('colocation col-1 10: ( bar wiz') |
Using the snippet: <|code_start|> #self.assertTrue(['sequential', 'false'] in out.resources[0][1])
self.assertEqual(out.get('id'), 'o1')
out = self._parse('order o1 Mandatory: A B C sequential=true')
self.assertEqual(1, len(out.xpath('/rsc_order/resource_set')))
#self.assertTrue(['sequential', 'true'] not in out.resources[0][1])
self.assertEqual(out.get('id'), 'o1')
out = self._parse('order c_apache_1 Mandatory: apache:start ip_1')
self.assertEqual(out.get('id'), 'c_apache_1')
out = self._parse('order c_apache_2 Mandatory: apache:start ip_1 ip_2 ip_3')
self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
self.assertEqual(out.get('id'), 'c_apache_2')
out = self._parse('order o1 Serialize: A ( B C )')
self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
self.assertEqual(out.get('id'), 'o1')
out = self._parse('order o1 Serialize: A ( B C ) symmetrical=false')
self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
self.assertEqual(out.get('id'), 'o1')
self.assertEqual(['false'], out.xpath('//@symmetrical'))
out = self._parse('order o1 Serialize: A ( B C ) symmetrical=true')
self.assertEqual(2, len(out.xpath('/rsc_order/resource_set')))
self.assertEqual(out.get('id'), 'o1')
self.assertEqual(['true'], out.xpath('//@symmetrical'))
inp = 'colocation rsc_colocation-master INFINITY: [ vip-master vip-rep sequential=true ] [ msPostgresql:Master sequential=true ]'
<|code_end|>
, determine the next line of code. You have imports:
from unittest import mock
from builtins import zip
from crmsh import parse
from crmsh.utils import lines2cli
from crmsh.xmlutil import xml_tostring
from lxml import etree
import mock
import unittest
import shlex
and context (class names, function names, or code) available:
# Path: crmsh/parse.py
# def parse(self, cmd):
# "Called by do_parse(). Raises ParseError if parsing fails."
# raise NotImplementedError
#
# Path: crmsh/utils.py
# def lines2cli(s):
# '''
# Convert a string into a list of lines. Replace continuation
# characters. Strip white space, left and right. Drop empty lines.
# '''
# cl = []
# l = s.split('\n')
# cum = []
# for p in l:
# p = p.strip()
# if p.endswith('\\'):
# p = p.rstrip('\\')
# cum.append(p)
# else:
# cum.append(p)
# cl.append(''.join(cum).strip())
# cum = []
# if cum: # in case s ends with backslash
# cl.append(''.join(cum))
# return [x for x in cl if x]
#
# Path: crmsh/xmlutil.py
# def xml_tostring(*args, **kwargs):
# """
# Python 2/3 conversion utility:
# etree.tostring returns a bytestring, but
# we need actual Python strings.
# """
# return etree.tostring(*args, **kwargs).decode('utf-8')
. Output only the next line. | out = self._parse(inp) |
Based on the snippet: <|code_start|>
@mock.patch('logging.Logger.error')
def test_acl(self, mock_error):
out = self._parse('role user-1 error')
self.assertFalse(out)
out = self._parse('user user-1 role:user-1')
self.assertNotEqual(out, False)
out = self._parse("role bigdb_admin " +
"write meta:bigdb:target-role " +
"write meta:bigdb:is-managed " +
"write location:bigdb " +
"read ref:bigdb")
self.assertEqual(4, len(out))
# new type of acls
out = self._parse("acl_target foo a")
self.assertEqual('acl_target', out.tag)
self.assertEqual('foo', out.get('id'))
self.assertEqual(['a'], out.xpath('./role/@id'))
out = self._parse("acl_target foo a b")
self.assertEqual('acl_target', out.tag)
self.assertEqual('foo', out.get('id'))
self.assertEqual(['a', 'b'], out.xpath('./role/@id'))
out = self._parse("acl_target foo a b c")
self.assertEqual('acl_target', out.tag)
self.assertEqual('foo', out.get('id'))
<|code_end|>
, predict the immediate next line with the help of imports:
from unittest import mock
from builtins import zip
from crmsh import parse
from crmsh.utils import lines2cli
from crmsh.xmlutil import xml_tostring
from lxml import etree
import mock
import unittest
import shlex
and context (classes, functions, sometimes code) from other files:
# Path: crmsh/parse.py
# def parse(self, cmd):
# "Called by do_parse(). Raises ParseError if parsing fails."
# raise NotImplementedError
#
# Path: crmsh/utils.py
# def lines2cli(s):
# '''
# Convert a string into a list of lines. Replace continuation
# characters. Strip white space, left and right. Drop empty lines.
# '''
# cl = []
# l = s.split('\n')
# cum = []
# for p in l:
# p = p.strip()
# if p.endswith('\\'):
# p = p.rstrip('\\')
# cum.append(p)
# else:
# cum.append(p)
# cl.append(''.join(cum).strip())
# cum = []
# if cum: # in case s ends with backslash
# cl.append(''.join(cum))
# return [x for x in cl if x]
#
# Path: crmsh/xmlutil.py
# def xml_tostring(*args, **kwargs):
# """
# Python 2/3 conversion utility:
# etree.tostring returns a bytestring, but
# we need actual Python strings.
# """
# return etree.tostring(*args, **kwargs).decode('utf-8')
. Output only the next line. | self.assertEqual(['a', 'b', 'c'], out.xpath('./role/@id')) |
Here is a snippet: <|code_start|> "ptest": "ptest",
"simulate": "crm_simulate",
}
meta_progs = ("crmd", "pengine", "stonithd", "cib")
meta_progs_20 = ("pacemaker-controld", "pacemaker-schedulerd", "pacemaker-fenced", "pacemaker-based")
# elide these properties from tab completion
crmd_metadata_do_not_complete = ("dc-version",
"cluster-infrastructure",
"crmd-integration-timeout",
"crmd-finalization-timeout",
"expected-quorum-votes")
extra_cluster_properties = ("dc-version",
"cluster-infrastructure",
"last-lrm-refresh",
"cluster-name")
pcmk_version = "" # set later
container_type = ["docker", "rkt"]
container_helptxt = {
"docker": {
"image": """image:(string)
Docker image tag(required)""",
"replicas": """replicas:(integer)
Default:Value of masters if that is positive, else 1
A positive integer specifying the number of container instances to launch""",
"replicas-per-host": """replicas-per-host:(integer)
<|code_end|>
. Write the next line using the current file imports:
from .ordereddict import odict
and context from other files:
# Path: crmsh/ordereddict.py
# class OrderedDict(dict, DictMixin):
# def __init__(self, *args, **kwds):
# def clear(self):
# def __setitem__(self, key, value):
# def __delitem__(self, key):
# def __iter__(self):
# def __reversed__(self):
# def popitem(self, last=True):
# def __reduce__(self):
# def keys(self):
# def __repr__(self):
# def copy(self):
# def fromkeys(cls, iterable, value=None):
# def __eq__(self, other):
# def __ne__(self, other):
, which may include functions, classes, or code. Output only the next line. | Default:1 |
Given the code snippet: <|code_start|> followed by some {{foo}}.{{wiz}}
and then some at the end"""
assert """Here's a line of text
followed by another line
followed by some a.b
and then some at the end""" == handles.parse(t, {'foo': "a", 'wiz': "b"})
def test_weird_chars():
t = "{{foo#_bar}}"
assert "hello" == handles.parse(t, {'foo#_bar': 'hello'})
t = "{{_foo$bar_}}"
assert "hello" == handles.parse(t, {'_foo$bar_': 'hello'})
def test_conditional():
t = """{{#foo}}before{{foo:bar}}after{{/foo}}"""
assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}})
assert "" == handles.parse(t, {'faa': {'bar': 'hello'}})
t = """{{#cond}}before{{foo:bar}}after{{/cond}}"""
assert "beforehelloafter" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': True})
assert "" == handles.parse(t, {'foo': {'bar': 'hello'}, 'cond': False})
def test_iteration():
t = """{{#foo}}!{{foo:bar}}!{{/foo}}"""
assert "!hello!!there!" == handles.parse(t, {'foo': [{'bar': 'hello'}, {'bar': 'there'}]})
<|code_end|>
, generate the next line using the imports in this file:
from crmsh import handles
and context (functions, classes, or occasionally code) from other files:
# Path: crmsh/handles.py
# class value(object):
# def __init__(self, obj, value):
# def __call__(self):
# def __repr__(self):
# def __str__(self):
# def _join(d1, d2):
# def _resolve(path, context, strict):
# def _push(path, value, context):
# def _textify(obj):
# def _parse(template, context, strict):
# def parse(template, values, strict=False):
. Output only the next line. | def test_result(): |
Here is a snippet: <|code_start|># Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund@suse.com>
# See COPYING for license information.
logger = log.setup_logger(__name__)
logger_utils = log.LoggerUtils(logger)
_NVPAIR_RE = re.compile(r'([^=@$][^=]*)=(.*)$')
_NVPAIR_ID_RE = re.compile(r'\$([^:=]+)(?::(.+))?=(.*)$')
_NVPAIR_REF_RE = re.compile(r'@([^:]+)(?::(.+))?$')
_NVPAIR_KEY_RE = re.compile(r'([^:=]+)$', re.IGNORECASE)
_IDENT_RE = re.compile(r'([a-z0-9_#$-][^=]*)$', re.IGNORECASE)
_DISPATCH_RE = re.compile(r'[a-z0-9_]+$', re.IGNORECASE)
_DESC_RE = re.compile(r'description=(.+)$', re.IGNORECASE)
_ATTR_RE = re.compile(r'\$?([^=]+)=(.*)$')
_ALERT_PATH_RE = re.compile(r'(.*)$')
<|code_end|>
. Write the next line using the current file imports:
import shlex
import re
import inspect
from lxml import etree
from . import constants
from .ra import disambiguate_ra_type, ra_type_validate
from . import schema
from .utils import keyword_cmp, verify_boolean, lines2cli
from .utils import get_boolean, olist, canonical_boolean
from .utils import handle_role_for_ocf_1_1
from . import xmlutil
from . import log
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from collections import defaultdict
from itertools import repeat
from .cibconfig import cib_factory
from copy import deepcopy
and context from other files:
# Path: crmsh/ra.py
# def disambiguate_ra_type(s):
# '''
# Unravel [class:[provider:]]type
# '''
# l = s.split(':')
# if not l or len(l) > 3:
# return ["", "", ""]
# if len(l) == 3:
# return l
# elif len(l) == 2:
# cl, tp = l
# else:
# cl, tp = "ocf", l[0]
# pr = pick_provider(ra_providers(tp, cl)) if cl == 'ocf' else ''
# return cl, pr, tp
#
# def ra_type_validate(s, ra_class, provider, rsc_type):
# '''
# Only ocf ra class supports providers.
# '''
# if not rsc_type:
# logger.error("bad resource type specification %s", s)
# return False
# if ra_class == "ocf":
# if not provider:
# logger.error("provider could not be determined for %s", s)
# return False
# else:
# if provider:
# logger.warning("ra class %s does not support providers", ra_class)
# return True
# return True
#
# Path: crmsh/utils.py
# def keyword_cmp(string1, string2):
# return string1.lower() == string2.lower()
#
# def verify_boolean(opt):
# return opt.lower() in ("yes", "true", "on", "1") or \
# opt.lower() in ("no", "false", "off", "0")
#
# def lines2cli(s):
# '''
# Convert a string into a list of lines. Replace continuation
# characters. Strip white space, left and right. Drop empty lines.
# '''
# cl = []
# l = s.split('\n')
# cum = []
# for p in l:
# p = p.strip()
# if p.endswith('\\'):
# p = p.rstrip('\\')
# cum.append(p)
# else:
# cum.append(p)
# cl.append(''.join(cum).strip())
# cum = []
# if cum: # in case s ends with backslash
# cl.append(''.join(cum))
# return [x for x in cl if x]
#
# Path: crmsh/utils.py
# def get_boolean(opt, dflt=False):
# if not opt:
# return dflt
# return is_boolean_true(opt)
#
# class olist(list):
# """
# Implements the 'in' operator
# in a case-insensitive manner,
# allowing "if x in olist(...)"
# """
# def __init__(self, keys):
# super(olist, self).__init__([k.lower() for k in keys])
#
# def __contains__(self, key):
# return super(olist, self).__contains__(key.lower())
#
# def append(self, key):
# super(olist, self).append(key.lower())
#
# def canonical_boolean(opt):
# return 'true' if is_boolean_true(opt) else 'false'
#
# Path: crmsh/utils.py
# def handle_role_for_ocf_1_1(value, name='role'):
# """
# * Convert role from Promoted/Unpromoted to Master/Slave if schema doesn't support OCF 1.1
# * Convert role from Master/Slave to Promoted/Unpromoted if ocf1.1 cib schema detected and OCF_1_1_SUPPORT is yes
# """
# role_names = ["role", "target-role"]
# downgrade_dict = {"Promoted": "Master", "Unpromoted": "Slave"}
# upgrade_dict = {v: k for k, v in downgrade_dict.items()}
#
# if name not in role_names:
# return value
# if value in downgrade_dict and not is_ocf_1_1_cib_schema_detected():
# logger.warning('Convert "%s" to "%s" since the current schema version is old and not upgraded yet. Please consider "%s"', value, downgrade_dict[value], constants.CIB_UPGRADE)
# return downgrade_dict[value]
# if value in upgrade_dict and is_ocf_1_1_cib_schema_detected() and config.core.OCF_1_1_SUPPORT:
# logger.info('Convert deprecated "%s" to "%s"', value, upgrade_dict[value])
# return upgrade_dict[value]
#
# return value
, which may include functions, classes, or code. Output only the next line. | _RESOURCE_RE = re.compile(r'([a-z_#$][^=]*)$', re.IGNORECASE) |
Here is a snippet: <|code_start|># Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund@suse.com>
# See COPYING for license information.
logger = log.setup_logger(__name__)
logger_utils = log.LoggerUtils(logger)
_NVPAIR_RE = re.compile(r'([^=@$][^=]*)=(.*)$')
_NVPAIR_ID_RE = re.compile(r'\$([^:=]+)(?::(.+))?=(.*)$')
_NVPAIR_REF_RE = re.compile(r'@([^:]+)(?::(.+))?$')
_NVPAIR_KEY_RE = re.compile(r'([^:=]+)$', re.IGNORECASE)
_IDENT_RE = re.compile(r'([a-z0-9_#$-][^=]*)$', re.IGNORECASE)
_DISPATCH_RE = re.compile(r'[a-z0-9_]+$', re.IGNORECASE)
_DESC_RE = re.compile(r'description=(.+)$', re.IGNORECASE)
_ATTR_RE = re.compile(r'\$?([^=]+)=(.*)$')
<|code_end|>
. Write the next line using the current file imports:
import shlex
import re
import inspect
from lxml import etree
from . import constants
from .ra import disambiguate_ra_type, ra_type_validate
from . import schema
from .utils import keyword_cmp, verify_boolean, lines2cli
from .utils import get_boolean, olist, canonical_boolean
from .utils import handle_role_for_ocf_1_1
from . import xmlutil
from . import log
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from collections import defaultdict
from itertools import repeat
from .cibconfig import cib_factory
from copy import deepcopy
and context from other files:
# Path: crmsh/ra.py
# def disambiguate_ra_type(s):
# '''
# Unravel [class:[provider:]]type
# '''
# l = s.split(':')
# if not l or len(l) > 3:
# return ["", "", ""]
# if len(l) == 3:
# return l
# elif len(l) == 2:
# cl, tp = l
# else:
# cl, tp = "ocf", l[0]
# pr = pick_provider(ra_providers(tp, cl)) if cl == 'ocf' else ''
# return cl, pr, tp
#
# def ra_type_validate(s, ra_class, provider, rsc_type):
# '''
# Only ocf ra class supports providers.
# '''
# if not rsc_type:
# logger.error("bad resource type specification %s", s)
# return False
# if ra_class == "ocf":
# if not provider:
# logger.error("provider could not be determined for %s", s)
# return False
# else:
# if provider:
# logger.warning("ra class %s does not support providers", ra_class)
# return True
# return True
#
# Path: crmsh/utils.py
# def keyword_cmp(string1, string2):
# return string1.lower() == string2.lower()
#
# def verify_boolean(opt):
# return opt.lower() in ("yes", "true", "on", "1") or \
# opt.lower() in ("no", "false", "off", "0")
#
# def lines2cli(s):
# '''
# Convert a string into a list of lines. Replace continuation
# characters. Strip white space, left and right. Drop empty lines.
# '''
# cl = []
# l = s.split('\n')
# cum = []
# for p in l:
# p = p.strip()
# if p.endswith('\\'):
# p = p.rstrip('\\')
# cum.append(p)
# else:
# cum.append(p)
# cl.append(''.join(cum).strip())
# cum = []
# if cum: # in case s ends with backslash
# cl.append(''.join(cum))
# return [x for x in cl if x]
#
# Path: crmsh/utils.py
# def get_boolean(opt, dflt=False):
# if not opt:
# return dflt
# return is_boolean_true(opt)
#
# class olist(list):
# """
# Implements the 'in' operator
# in a case-insensitive manner,
# allowing "if x in olist(...)"
# """
# def __init__(self, keys):
# super(olist, self).__init__([k.lower() for k in keys])
#
# def __contains__(self, key):
# return super(olist, self).__contains__(key.lower())
#
# def append(self, key):
# super(olist, self).append(key.lower())
#
# def canonical_boolean(opt):
# return 'true' if is_boolean_true(opt) else 'false'
#
# Path: crmsh/utils.py
# def handle_role_for_ocf_1_1(value, name='role'):
# """
# * Convert role from Promoted/Unpromoted to Master/Slave if schema doesn't support OCF 1.1
# * Convert role from Master/Slave to Promoted/Unpromoted if ocf1.1 cib schema detected and OCF_1_1_SUPPORT is yes
# """
# role_names = ["role", "target-role"]
# downgrade_dict = {"Promoted": "Master", "Unpromoted": "Slave"}
# upgrade_dict = {v: k for k, v in downgrade_dict.items()}
#
# if name not in role_names:
# return value
# if value in downgrade_dict and not is_ocf_1_1_cib_schema_detected():
# logger.warning('Convert "%s" to "%s" since the current schema version is old and not upgraded yet. Please consider "%s"', value, downgrade_dict[value], constants.CIB_UPGRADE)
# return downgrade_dict[value]
# if value in upgrade_dict and is_ocf_1_1_cib_schema_detected() and config.core.OCF_1_1_SUPPORT:
# logger.info('Convert deprecated "%s" to "%s"', value, upgrade_dict[value])
# return upgrade_dict[value]
#
# return value
, which may include functions, classes, or code. Output only the next line. | _ALERT_PATH_RE = re.compile(r'(.*)$') |
Here is a snippet: <|code_start|># Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund@suse.com>
# See COPYING for license information.
logger = log.setup_logger(__name__)
logger_utils = log.LoggerUtils(logger)
_NVPAIR_RE = re.compile(r'([^=@$][^=]*)=(.*)$')
_NVPAIR_ID_RE = re.compile(r'\$([^:=]+)(?::(.+))?=(.*)$')
_NVPAIR_REF_RE = re.compile(r'@([^:]+)(?::(.+))?$')
_NVPAIR_KEY_RE = re.compile(r'([^:=]+)$', re.IGNORECASE)
_IDENT_RE = re.compile(r'([a-z0-9_#$-][^=]*)$', re.IGNORECASE)
_DISPATCH_RE = re.compile(r'[a-z0-9_]+$', re.IGNORECASE)
_DESC_RE = re.compile(r'description=(.+)$', re.IGNORECASE)
_ATTR_RE = re.compile(r'\$?([^=]+)=(.*)$')
_ALERT_PATH_RE = re.compile(r'(.*)$')
<|code_end|>
. Write the next line using the current file imports:
import shlex
import re
import inspect
from lxml import etree
from . import constants
from .ra import disambiguate_ra_type, ra_type_validate
from . import schema
from .utils import keyword_cmp, verify_boolean, lines2cli
from .utils import get_boolean, olist, canonical_boolean
from .utils import handle_role_for_ocf_1_1
from . import xmlutil
from . import log
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from .cibconfig import cib_factory
from collections import defaultdict
from itertools import repeat
from .cibconfig import cib_factory
from copy import deepcopy
and context from other files:
# Path: crmsh/ra.py
# def disambiguate_ra_type(s):
# '''
# Unravel [class:[provider:]]type
# '''
# l = s.split(':')
# if not l or len(l) > 3:
# return ["", "", ""]
# if len(l) == 3:
# return l
# elif len(l) == 2:
# cl, tp = l
# else:
# cl, tp = "ocf", l[0]
# pr = pick_provider(ra_providers(tp, cl)) if cl == 'ocf' else ''
# return cl, pr, tp
#
# def ra_type_validate(s, ra_class, provider, rsc_type):
# '''
# Only ocf ra class supports providers.
# '''
# if not rsc_type:
# logger.error("bad resource type specification %s", s)
# return False
# if ra_class == "ocf":
# if not provider:
# logger.error("provider could not be determined for %s", s)
# return False
# else:
# if provider:
# logger.warning("ra class %s does not support providers", ra_class)
# return True
# return True
#
# Path: crmsh/utils.py
# def keyword_cmp(string1, string2):
# return string1.lower() == string2.lower()
#
# def verify_boolean(opt):
# return opt.lower() in ("yes", "true", "on", "1") or \
# opt.lower() in ("no", "false", "off", "0")
#
# def lines2cli(s):
# '''
# Convert a string into a list of lines. Replace continuation
# characters. Strip white space, left and right. Drop empty lines.
# '''
# cl = []
# l = s.split('\n')
# cum = []
# for p in l:
# p = p.strip()
# if p.endswith('\\'):
# p = p.rstrip('\\')
# cum.append(p)
# else:
# cum.append(p)
# cl.append(''.join(cum).strip())
# cum = []
# if cum: # in case s ends with backslash
# cl.append(''.join(cum))
# return [x for x in cl if x]
#
# Path: crmsh/utils.py
# def get_boolean(opt, dflt=False):
# if not opt:
# return dflt
# return is_boolean_true(opt)
#
# class olist(list):
# """
# Implements the 'in' operator
# in a case-insensitive manner,
# allowing "if x in olist(...)"
# """
# def __init__(self, keys):
# super(olist, self).__init__([k.lower() for k in keys])
#
# def __contains__(self, key):
# return super(olist, self).__contains__(key.lower())
#
# def append(self, key):
# super(olist, self).append(key.lower())
#
# def canonical_boolean(opt):
# return 'true' if is_boolean_true(opt) else 'false'
#
# Path: crmsh/utils.py
# def handle_role_for_ocf_1_1(value, name='role'):
# """
# * Convert role from Promoted/Unpromoted to Master/Slave if schema doesn't support OCF 1.1
# * Convert role from Master/Slave to Promoted/Unpromoted if ocf1.1 cib schema detected and OCF_1_1_SUPPORT is yes
# """
# role_names = ["role", "target-role"]
# downgrade_dict = {"Promoted": "Master", "Unpromoted": "Slave"}
# upgrade_dict = {v: k for k, v in downgrade_dict.items()}
#
# if name not in role_names:
# return value
# if value in downgrade_dict and not is_ocf_1_1_cib_schema_detected():
# logger.warning('Convert "%s" to "%s" since the current schema version is old and not upgraded yet. Please consider "%s"', value, downgrade_dict[value], constants.CIB_UPGRADE)
# return downgrade_dict[value]
# if value in upgrade_dict and is_ocf_1_1_cib_schema_detected() and config.core.OCF_1_1_SUPPORT:
# logger.info('Convert deprecated "%s" to "%s"', value, upgrade_dict[value])
# return upgrade_dict[value]
#
# return value
, which may include functions, classes, or code. Output only the next line. | _RESOURCE_RE = re.compile(r'([a-z_#$][^=]*)$', re.IGNORECASE) |
Next line prediction: <|code_start|> @mock.patch('crmsh.crash_test.utils.crmshutils.get_stdout_stderr')
def test_check_node_status(self, mock_run, mock_error):
output = """
1084783297 15sp2-1 member
1084783193 15sp2-2 lost
"""
mock_run.return_value = (0, output, None)
res = utils.check_node_status("15sp2-2", "member")
self.assertEqual(res, False)
res = utils.check_node_status("15sp2-1", "member")
self.assertEqual(res, True)
mock_run.assert_has_calls([
mock.call("crm_node -l"),
mock.call("crm_node -l")
])
mock_error.assert_not_called()
@mock.patch('crmsh.crash_test.utils.crmshutils.get_stdout_stderr')
def test_online_nodes_empty(self, mock_run):
mock_run.return_value = (0, "data", None)
res = utils.online_nodes()
self.assertEqual(res, [])
mock_run.assert_called_once_with("crm_mon -1")
@mock.patch('crmsh.crash_test.utils.crmshutils.get_stdout_stderr')
def test_online_nodes(self, mock_run):
output = """
Node List:
<|code_end|>
. Use current file imports:
(import os
import sys
import mock
import logging
from unittest import mock, TestCase
from crmsh.crash_test import utils, main, config)
and context including class names, function names, or small code snippets from other files:
# Path: crmsh/crash_test/utils.py
# CRED = '\033[31m'
# CYELLOW = '\033[33m'
# CGREEN = '\033[32m'
# CEND = '\033[0m'
# LEVEL = {
# "info": logging.INFO,
# "warn": logging.WARNING,
# "error": logging.ERROR
# }
# FORMAT_FLUSH = "[%(asctime)s]%(levelname)s: %(message)s"
# FORMAT_NOFLUSH = "%(timestamp)s%(levelname)s: %(message)s"
# COLORS = {
# 'WARNING': CYELLOW,
# 'INFO': CGREEN,
# 'ERROR': CRED
# }
# class MyLoggingFormatter(logging.Formatter):
# class FenceInfo(object):
# def __init__(self, flush=True):
# def format(self, record):
# def now(form="%Y/%m/%d %H:%M:%S"):
# def manage_handler(_type, keep=True):
# def msg_raw(level, msg, to_stdout=True):
# def msg_info(msg, to_stdout=True):
# def msg_warn(msg, to_stdout=True):
# def msg_error(msg, to_stdout=True):
# def json_dumps():
# def fence_enabled(self):
# def fence_action(self):
# def fence_timeout(self):
# def check_node_status(node, state):
# def online_nodes():
# def peer_node_list():
# def this_node():
# def str_to_datetime(str_time, fmt):
# def corosync_port_list():
# def get_handler(logger, _type):
# def is_root():
# def get_process_status(s):
# def _find_match_count(str1, str2):
# def is_valid_sbd(dev):
# def find_candidate_sbd(dev):
# def warning_ask(warn_string):
#
# Path: crmsh/crash_test/main.py
# class Context(object):
# class MyArgParseFormatter(RawDescriptionHelpFormatter):
# def __init__(self):
# def __setattr__(self, name, value):
# def kill_process(context):
# def split_brain(context):
# def fence_node(context):
# def __init__(self, prog):
# def parse_argument(context):
# def setup_basic_context(context):
# def run(context):
. Output only the next line. | * Online: [ 15sp2-1 15sp2-2 ] |
Using the snippet: <|code_start|> err_output = """
==Dumping header on disk {}
==Header on disk {} NOT dumped
sbd failed; please check the logs.
""".format(dev, dev)
mock_os_path_exists.return_value = True
mock_sbd_check_header.return_value = (1, "==Dumping header on disk {}".format(dev),
err_output)
res = utils.is_valid_sbd(dev)
assert res is False
mock_msg_err.assert_called_once_with(err_output)
@classmethod
@mock.patch('crmsh.utils.get_stdout_stderr')
@mock.patch('os.path.exists')
def test_is_valid_sbd_is_sbd(cls, mock_os_path_exists,
mock_sbd_check_header):
"""
Test device is not SBD device
"""
dev = "/dev/disk/by-id/scsi-device1"
std_output = """
==Dumping header on disk {}
Header version : 2.1
UUID : f4c99362-6522-46fc-8ce4-7db60aff19bb
Number of slots : 255
Sector size : 512
Timeout (watchdog) : 5
Timeout (allocate) : 2
<|code_end|>
, determine the next line of code. You have imports:
import os
import sys
import mock
import logging
from unittest import mock, TestCase
from crmsh.crash_test import utils, main, config
and context (class names, function names, or code) available:
# Path: crmsh/crash_test/utils.py
# CRED = '\033[31m'
# CYELLOW = '\033[33m'
# CGREEN = '\033[32m'
# CEND = '\033[0m'
# LEVEL = {
# "info": logging.INFO,
# "warn": logging.WARNING,
# "error": logging.ERROR
# }
# FORMAT_FLUSH = "[%(asctime)s]%(levelname)s: %(message)s"
# FORMAT_NOFLUSH = "%(timestamp)s%(levelname)s: %(message)s"
# COLORS = {
# 'WARNING': CYELLOW,
# 'INFO': CGREEN,
# 'ERROR': CRED
# }
# class MyLoggingFormatter(logging.Formatter):
# class FenceInfo(object):
# def __init__(self, flush=True):
# def format(self, record):
# def now(form="%Y/%m/%d %H:%M:%S"):
# def manage_handler(_type, keep=True):
# def msg_raw(level, msg, to_stdout=True):
# def msg_info(msg, to_stdout=True):
# def msg_warn(msg, to_stdout=True):
# def msg_error(msg, to_stdout=True):
# def json_dumps():
# def fence_enabled(self):
# def fence_action(self):
# def fence_timeout(self):
# def check_node_status(node, state):
# def online_nodes():
# def peer_node_list():
# def this_node():
# def str_to_datetime(str_time, fmt):
# def corosync_port_list():
# def get_handler(logger, _type):
# def is_root():
# def get_process_status(s):
# def _find_match_count(str1, str2):
# def is_valid_sbd(dev):
# def find_candidate_sbd(dev):
# def warning_ask(warn_string):
#
# Path: crmsh/crash_test/main.py
# class Context(object):
# class MyArgParseFormatter(RawDescriptionHelpFormatter):
# def __init__(self):
# def __setattr__(self, name, value):
# def kill_process(context):
# def split_brain(context):
# def fence_node(context):
# def __init__(self, prog):
# def parse_argument(context):
# def setup_basic_context(context):
# def run(context):
. Output only the next line. | Timeout (loop) : 1 |
Based on the snippet: <|code_start|>
def login_required(view_callable):
def check_login(request, *args, **kwargs):
if request.user.is_authenticated:
return view_callable(request, *args, **kwargs)
assert hasattr(request, 'session'), "Session middleware needed."
login_kwargs = {
'extra_context': {
REDIRECT_FIELD_NAME: request.get_full_path(),
'from_decorator': True,
},
'authentication_form': AuthForm,
}
<|code_end|>
, predict the immediate next line with the help of imports:
from functools import wraps
from django.contrib.auth import REDIRECT_FIELD_NAME
from ratelimitbackend.views import login
from .profiles.forms import AuthForm
and context (classes, functions, sometimes code) from other files:
# Path: feedhq/profiles/forms.py
# class AuthForm(AuthenticationForm):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.fields['username'].label = _('Username or Email')
. Output only the next line. | return login(request, **login_kwargs) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
USER_AGENT = (
'FeedHQ/%s (https://github.com/feedhq/feedhq; %%s; https://github.com/'
'feedhq/feedhq/wiki/fetcher; like FeedFetcher-Google)'
) % __version__
FAVICON_FETCHER = USER_AGENT % 'favicon fetcher'
LINK_CHECKER = USER_AGENT % 'ping'
<|code_end|>
. Use current file imports:
(import datetime
import requests
from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
from django.conf import settings
from django.core.cache import cache
from django.utils import timezone
from rache import job_details, job_key
from requests.exceptions import (ConnectionError, ConnectTimeout,
InvalidSchema, InvalidURL, MissingSchema,
ReadTimeout, TooManyRedirects)
from requests.packages.urllib3.exceptions import LocationValueError
from .. import __version__
from ..utils import get_redis_connection)
and context including class names, function names, or small code snippets from other files:
# Path: feedhq/utils.py
# def get_redis_connection():
# """
# Helper used for obtain a raw redis client.
# """
# from redis_cache.cache import pool
# client = redis.Redis(**settings.REDIS)
# client.connection_pool = pool.get_connection_pool(
# client,
# parser_class=redis.connection.HiredisParser,
# connection_pool_class=redis.ConnectionPool,
# connection_pool_class_kwargs={},
# **settings.REDIS)
# return client
. Output only the next line. | def is_feed(parsed): |
Here is a snippet: <|code_start|> FONT_PT_SANS = 'pt-sans'
FONT_UBUNTU_CONDENSED = 'ubuntu-condensed'
FONT_SOURCE_SANS_PRO = 'source-sans-pro'
FONTS = (
(
_('Serif'), (
(FONT_DROID_SERIF, 'Droid Serif'),
(FONT_GENTIUM_BASIC, 'Gentium Basic'),
(FONT_MERRIWEATHER, 'Merriweather'),
(FONT_PALATINO, _('Palatino (system font)')),
(FONT_POLY, 'Poly'),
(FONT_PT_SERIF, 'PT Serif'),
)
), (
_('Sans Serif'), (
(FONT_ABEL, 'Abel'),
(FONT_DROID_SANS, 'Droid Sans'),
(FONT_HELVETICA, _('Helvetica (system font)')),
(FONT_MULI, 'Muli'),
(FONT_OPEN_SANS, 'Open Sans'),
(FONT_PT_SANS, 'PT Sans'),
(FONT_UBUNTU_CONDENSED, 'Ubuntu Condensed'),
(FONT_SOURCE_SANS_PRO, 'Source Sans Pro'),
)
)
)
username = models.CharField(max_length=75, unique=True)
email = models.CharField(max_length=75)
<|code_end|>
. Write the next line using the current file imports:
import json
import pytz
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
UserManager)
from django.db import models
from django.db.models import Max
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from .. import es
from ..utils import get_redis_connection
and context from other files:
# Path: feedhq/utils.py
# def get_redis_connection():
# """
# Helper used for obtain a raw redis client.
# """
# from redis_cache.cache import pool
# client = redis.Redis(**settings.REDIS)
# client.connection_pool = pool.get_connection_pool(
# client,
# parser_class=redis.connection.HiredisParser,
# connection_pool_class=redis.ConnectionPool,
# connection_pool_class_kwargs={},
# **settings.REDIS)
# return client
, which may include functions, classes, or code. Output only the next line. | is_staff = models.BooleanField(default=False) |
Predict the next line for this snippet: <|code_start|>
logger = get_logger(__name__)
def sentry_handler(job, *exc_info):
extra = {
<|code_end|>
with the help of current file imports:
import os
from raven import Client
from rq import Connection, Queue, Worker
from structlog import get_logger
from . import SentryCommand
from ....utils import get_redis_connection
and context from other files:
# Path: feedhq/utils.py
# def get_redis_connection():
# """
# Helper used for obtain a raw redis client.
# """
# from redis_cache.cache import pool
# client = redis.Redis(**settings.REDIS)
# client.connection_pool = pool.get_connection_pool(
# client,
# parser_class=redis.connection.HiredisParser,
# connection_pool_class=redis.ConnectionPool,
# connection_pool_class_kwargs={},
# **settings.REDIS)
# return client
, which may contain function names, class names, or code. Output only the next line. | 'job_id': job.id, |
Given the code snippet: <|code_start|>
logger = structlog.get_logger(__name__)
class Command(SentryCommand):
def handle_sentry(self, **options):
r = get_redis_connection()
prefix = 'rq:job:'
keys = (
"".join(chars) for chars in product('0123456789abcdef', repeat=1)
)
delay = (
datetime.datetime.utcnow().replace(tzinfo=pytz.utc) -
datetime.timedelta(days=5)
<|code_end|>
, generate the next line using the imports in this file:
import datetime
import pytz
import structlog
from itertools import product
from dateutil import parser
from . import SentryCommand
from ....utils import get_redis_connection
and context (functions, classes, or occasionally code) from other files:
# Path: feedhq/utils.py
# def get_redis_connection():
# """
# Helper used for obtain a raw redis client.
# """
# from redis_cache.cache import pool
# client = redis.Redis(**settings.REDIS)
# client.connection_pool = pool.get_connection_pool(
# client,
# parser_class=redis.connection.HiredisParser,
# connection_pool_class=redis.ConnectionPool,
# connection_pool_class_kwargs={},
# **settings.REDIS)
# return client
. Output only the next line. | ) |
Predict the next line after this snippet: <|code_start|>
admin.autodiscover()
urlpatterns = [
url(r'^admin/rq/', include('django_rq_dashboard.urls')),
url(r'^admin/', admin.site.urls),
url(r'^subscriber/', include('django_push.subscriber.urls')),
url(r'^health/$', views.health, name='health'),
url(r'^robots.txt$', views.robots),
url(r'^humans.txt$', views.humans),
url(r'^favicon.ico$', views.favicon),
url(r'^apple-touch-icon-precomposed.png$', views.touch_icon),
url(r'^apple-touch-icon.png$', views.touch_icon),
url(r'^', include(('feedhq.reader.urls', 'reader'), namespace='reader')),
url(r'^accounts/', include('feedhq.profiles.urls')),
url(r'^', include(('feedhq.feeds.urls', 'feeds'), namespace='feeds')),
url(r'^login/$', login, {'authentication_form': AuthForm}, name='login'),
url(r'^logout/$', views.logout, name='logout'),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# We need logging to be configured late -- in the settings it's too soon for
# logging_tree to properly detect loggers.
logging.config.dictConfig(configure_logging(
debug=settings.DEBUG,
syslog=settings.LOG_SYSLOG,
silenced_loggers=settings.SILENCED_LOGGERS,
<|code_end|>
using the current file's imports:
import logging.config
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from ratelimitbackend import admin
from ratelimitbackend.views import login
from . import monkey
from . import views # noqa
from .logging import configure_logging # noqa
from .profiles.forms import AuthForm # noqa
and any relevant context from other files:
# Path: feedhq/logging.py
# def configure_logging(debug=False, syslog=False, silenced_loggers=None,
# level_overrides=None):
# if silenced_loggers is None:
# silenced_loggers = []
# if level_overrides is None:
# level_overrides = {}
# level = 'DEBUG' if debug else 'INFO'
# renderers = [
# dev.ConsoleRenderer(),
# ] if debug else [
# logstash_processor,
# processors.JSONRenderer(separators=(',', ':')),
# add_syslog_program(syslog),
# ]
# structlog_processors = [
# stdlib.filter_by_level,
# stdlib.add_logger_name,
# stdlib.add_log_level,
# fix_logger_name,
# format_request,
# ensure_event,
# stdlib.PositionalArgumentsFormatter(),
# processors.TimeStamper(fmt="ISO", key='@timestamp'),
# processors.StackInfoRenderer(),
# processors.format_exc_info,
# ] + renderers
#
# configure(
# processors=structlog_processors,
# context_class=dict,
# logger_factory=stdlib.LoggerFactory(),
# wrapper_class=stdlib.BoundLogger,
# cache_logger_on_first_use=True,
# )
#
# structlog = {'handlers': ['raw'],
# 'level': level,
# 'propagate': False}
# null = {'handlers': ['null'],
# 'propagate': False}
# loggers = {l: root(level_overrides.get(l, level))
# for l, _, _ in logging_tree.tree()[2]}
# loggers['feedhq'] = structlog
#
# for nulled_logger in silenced_loggers:
# loggers[nulled_logger] = null
#
# raw = {
# 'level': level,
# 'class': 'logging.handlers.SysLogHandler',
# 'address': '/dev/log',
# 'facility': 'local0',
# } if syslog else {
# 'level': level,
# 'class': 'logging.StreamHandler',
# }
#
# return {
# 'version': 1,
# 'level': level,
# 'handlers': {
# 'root': {
# 'level': level,
# '()': StructlogHandler,
# },
# 'raw': raw,
# 'null': {
# 'class': 'logging.NullHandler',
# },
# },
# 'loggers': loggers,
# 'root': root(level),
# }
#
# Path: feedhq/profiles/forms.py
# class AuthForm(AuthenticationForm):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.fields['username'].label = _('Username or Email')
. Output only the next line. | level_overrides=settings.LOG_LEVEL_OVERRIDES, |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
monkey.patch_html5lib()
monkey.patch_feedparser()
admin.autodiscover()
urlpatterns = [
url(r'^admin/rq/', include('django_rq_dashboard.urls')),
url(r'^admin/', admin.site.urls),
url(r'^subscriber/', include('django_push.subscriber.urls')),
url(r'^health/$', views.health, name='health'),
url(r'^robots.txt$', views.robots),
url(r'^humans.txt$', views.humans),
url(r'^favicon.ico$', views.favicon),
url(r'^apple-touch-icon-precomposed.png$', views.touch_icon),
url(r'^apple-touch-icon.png$', views.touch_icon),
url(r'^', include(('feedhq.reader.urls', 'reader'), namespace='reader')),
url(r'^accounts/', include('feedhq.profiles.urls')),
<|code_end|>
. Write the next line using the current file imports:
import logging.config
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from ratelimitbackend import admin
from ratelimitbackend.views import login
from . import monkey
from . import views # noqa
from .logging import configure_logging # noqa
from .profiles.forms import AuthForm # noqa
and context from other files:
# Path: feedhq/logging.py
# def configure_logging(debug=False, syslog=False, silenced_loggers=None,
# level_overrides=None):
# if silenced_loggers is None:
# silenced_loggers = []
# if level_overrides is None:
# level_overrides = {}
# level = 'DEBUG' if debug else 'INFO'
# renderers = [
# dev.ConsoleRenderer(),
# ] if debug else [
# logstash_processor,
# processors.JSONRenderer(separators=(',', ':')),
# add_syslog_program(syslog),
# ]
# structlog_processors = [
# stdlib.filter_by_level,
# stdlib.add_logger_name,
# stdlib.add_log_level,
# fix_logger_name,
# format_request,
# ensure_event,
# stdlib.PositionalArgumentsFormatter(),
# processors.TimeStamper(fmt="ISO", key='@timestamp'),
# processors.StackInfoRenderer(),
# processors.format_exc_info,
# ] + renderers
#
# configure(
# processors=structlog_processors,
# context_class=dict,
# logger_factory=stdlib.LoggerFactory(),
# wrapper_class=stdlib.BoundLogger,
# cache_logger_on_first_use=True,
# )
#
# structlog = {'handlers': ['raw'],
# 'level': level,
# 'propagate': False}
# null = {'handlers': ['null'],
# 'propagate': False}
# loggers = {l: root(level_overrides.get(l, level))
# for l, _, _ in logging_tree.tree()[2]}
# loggers['feedhq'] = structlog
#
# for nulled_logger in silenced_loggers:
# loggers[nulled_logger] = null
#
# raw = {
# 'level': level,
# 'class': 'logging.handlers.SysLogHandler',
# 'address': '/dev/log',
# 'facility': 'local0',
# } if syslog else {
# 'level': level,
# 'class': 'logging.StreamHandler',
# }
#
# return {
# 'version': 1,
# 'level': level,
# 'handlers': {
# 'root': {
# 'level': level,
# '()': StructlogHandler,
# },
# 'raw': raw,
# 'null': {
# 'class': 'logging.NullHandler',
# },
# },
# 'loggers': loggers,
# 'root': root(level),
# }
#
# Path: feedhq/profiles/forms.py
# class AuthForm(AuthenticationForm):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.fields['username'].label = _('Username or Email')
, which may include functions, classes, or code. Output only the next line. | url(r'^', include(('feedhq.feeds.urls', 'feeds'), namespace='feeds')), |
Predict the next line after this snippet: <|code_start|> User.query.filter_by(sid=sid).delete()
db.session.commit()
def user_from_sid(sid):
return User.query.filter_by(sid=sid).first()
def graph_all_posts():
posts = Post.query.all()
d = min(p.posted_date for p in posts)
while d < date.today():
print("%s, %d" % (d, len([p for p in posts if p.posted_date == d])))
d += timedelta(days=1)
def get_active_users(num_days=7):
users = {}
for p in Post.query.filter(Post.posted_date > date.today() - timedelta(days=num_days)).all():
if p.user_sid not in users:
users[p.user_sid] = 0
users[p.user_sid] += 1
upairs = users.items()
upairs.sort()
upairs.sort(key=lambda x:x[1], reverse=True)
print("users by # posts in last %d days:", users)
for sid,count in upairs:
print("%3d: %s" % (count, sid))
def stalk(sid, depth=1):
entries = user_from_sid(sid).posts.order_by(Post.posted_date.desc()).limit(depth)
for e in entries:
<|code_end|>
using the current file's imports:
from tsundiary import User, Post, db
from datetime import date, datetime, timedelta
and any relevant context from other files:
# Path: tsundiary/models.py
# class User(db.Model):
# class Post(db.Model):
# def verify_password(self, password):
# def set_password(self, password):
# def __init__(self, name, password, email=None, invite_key=""):
# def __repr__(self):
# def viewable_by(self, viewer, today):
# def __init__(self, user_sid, content, posted_date):
# def __repr__(self):
# def init_db():
# def ensure_db_exists():
# def populate_db():
. Output only the next line. | print(e.posted_date) |
Next line prediction: <|code_start|>
def delete_user(sid):
Post.query.filter_by(user_sid=sid).delete()
User.query.filter_by(sid=sid).delete()
db.session.commit()
def user_from_sid(sid):
return User.query.filter_by(sid=sid).first()
def graph_all_posts():
posts = Post.query.all()
d = min(p.posted_date for p in posts)
while d < date.today():
print("%s, %d" % (d, len([p for p in posts if p.posted_date == d])))
d += timedelta(days=1)
def get_active_users(num_days=7):
users = {}
for p in Post.query.filter(Post.posted_date > date.today() - timedelta(days=num_days)).all():
if p.user_sid not in users:
users[p.user_sid] = 0
users[p.user_sid] += 1
<|code_end|>
. Use current file imports:
(from tsundiary import User, Post, db
from datetime import date, datetime, timedelta)
and context including class names, function names, or small code snippets from other files:
# Path: tsundiary/models.py
# class User(db.Model):
# class Post(db.Model):
# def verify_password(self, password):
# def set_password(self, password):
# def __init__(self, name, password, email=None, invite_key=""):
# def __repr__(self):
# def viewable_by(self, viewer, today):
# def __init__(self, user_sid, content, posted_date):
# def __repr__(self):
# def init_db():
# def ensure_db_exists():
# def populate_db():
. Output only the next line. | upairs = users.items() |
Given the following code snippet before the placeholder: <|code_start|>
def delete_user(sid):
Post.query.filter_by(user_sid=sid).delete()
User.query.filter_by(sid=sid).delete()
db.session.commit()
def user_from_sid(sid):
return User.query.filter_by(sid=sid).first()
def graph_all_posts():
posts = Post.query.all()
d = min(p.posted_date for p in posts)
while d < date.today():
print("%s, %d" % (d, len([p for p in posts if p.posted_date == d])))
d += timedelta(days=1)
def get_active_users(num_days=7):
users = {}
for p in Post.query.filter(Post.posted_date > date.today() - timedelta(days=num_days)).all():
if p.user_sid not in users:
users[p.user_sid] = 0
users[p.user_sid] += 1
upairs = users.items()
<|code_end|>
, predict the next line using imports from the current file:
from tsundiary import User, Post, db
from datetime import date, datetime, timedelta
and context including class names, function names, and sometimes code from other files:
# Path: tsundiary/models.py
# class User(db.Model):
# class Post(db.Model):
# def verify_password(self, password):
# def set_password(self, password):
# def __init__(self, name, password, email=None, invite_key=""):
# def __repr__(self):
# def viewable_by(self, viewer, today):
# def __init__(self, user_sid, content, posted_date):
# def __repr__(self):
# def init_db():
# def ensure_db_exists():
# def populate_db():
. Output only the next line. | upairs.sort() |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
class MockParser(object):
def __init__(self, main_sheet, sub_sheets):
self.main_sheet = Sheet(main_sheet)
self.sub_sheets = {k: Sheet(v) for k, v in sub_sheets.items()}
def test_spreadsheetouput_base_fails():
"""The base class should fail as it is missing functionality that child
classes must implement"""
spreadsheet_output = output.SpreadsheetOutput(parser=MockParser([], {}))
with pytest.raises(NotImplementedError):
spreadsheet_output.write_sheets()
def test_blank_sheets(tmpdir):
for format_name, spreadsheet_output_class in output.FORMATS.items():
spreadsheet_output = spreadsheet_output_class(
parser=MockParser([], {}),
main_sheet_name="release",
output_name=os.path.join(
tmpdir.strpath, "release" + output.FORMATS_SUFFIX[format_name]
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import openpyxl
import pytest
from flattentool import output
from flattentool.ODSReader import ODSReader
from flattentool.sheet import Sheet
and context (classes, functions, sometimes code) from other files:
# Path: flattentool/output.py
# class SpreadsheetOutput(object):
# class XLSXOutput(SpreadsheetOutput):
# class CSVOutput(SpreadsheetOutput):
# class ODSOutput(SpreadsheetOutput):
# def __init__(
# self, parser, main_sheet_name="main", output_name="unflattened", sheet_prefix=""
# ):
# def open(self):
# def write_sheet(self, sheet_name, sheet_header, sheet_lines=None):
# def write_sheets(self):
# def close(self):
# def open(self):
# def write_sheet(self, sheet_name, sheet):
# def close(self):
# def open(self):
# def write_sheet(self, sheet_name, sheet):
# def open(self):
# def _make_cell(self, value):
# def write_sheet(self, sheet_name, sheet):
# def close(self):
# FORMATS = {"xlsx": XLSXOutput, "csv": CSVOutput, "ods": ODSOutput}
# FORMATS_SUFFIX = {
# "xlsx": ".xlsx",
# "ods": ".ods",
# "csv": "", # This is the suffix for the directory
# }
#
# Path: flattentool/ODSReader.py
# class ODSReader:
#
# # loads the file
# def __init__(self, file, clonespannedcolumns=None):
# self.clonespannedcolumns = clonespannedcolumns
# self.doc = odf.opendocument.load(file)
# self.SHEETS = OrderedDict()
# for sheet in self.doc.spreadsheet.getElementsByType(Table):
# self.readSheet(sheet)
#
# # reads a sheet in the sheet dictionary, storing each sheet as an
# # array (rows) of arrays (columns)
# def readSheet(self, sheet):
# name = sheet.getAttribute("name")
# rows = sheet.getElementsByType(TableRow)
# arrRows = []
#
# # for each row
# for row in rows:
# row_comment = "" # noqa
# arrCells = GrowingList()
# cells = row.getElementsByType(TableCell)
#
# # for each cell
# count = 0
# for cell in cells:
# # repeated value?
# repeat = cell.getAttribute("numbercolumnsrepeated")
# if not repeat:
# repeat = 1
# spanned = int(cell.getAttribute("numbercolumnsspanned") or 0)
# # clone spanned cells
# if self.clonespannedcolumns is not None and spanned > 1:
# repeat = spanned
#
# for rr in range(int(repeat)): # repeated?
# if str(cell):
# value_type = cell.attributes.get(
# (
# "urn:oasis:names:tc:opendocument:xmlns:office:1.0",
# "value-type",
# )
# )
# if value_type == "float":
# value = cell.attributes.get(
# (
# "urn:oasis:names:tc:opendocument:xmlns:office:1.0",
# "value",
# )
# )
# if "." in str(value):
# arrCells[count] = float(value)
# else:
# arrCells[count] = int(value)
# elif value_type == "date":
# date_value = cell.attributes.get(
# (
# "urn:oasis:names:tc:opendocument:xmlns:office:1.0",
# "date-value",
# )
# )
# # fromisoformat assumes microseconds appear as 3 or
# # 6 digits, whereas ods drops trailing 0s, so can
# # have 1-6 digits, so pad some extra 0s
# if "." in date_value:
# date_value = date_value.ljust(26, "0")
# arrCells[count] = datetime.fromisoformat(date_value)
# else:
# arrCells[count] = str(cell)
# count += 1
#
# arrRows.append(arrCells)
#
# self.SHEETS[name] = arrRows
#
# # returns a sheet as an array (rows) of arrays (columns)
# def getSheet(self, name):
# return self.SHEETS[name]
#
# Path: flattentool/sheet.py
# class Sheet(object):
# """
# An abstract representation of a single sheet of a spreadsheet.
#
# """
#
# def __init__(self, columns=None, root_id="", name=None):
# self.id_columns = []
# self.columns = columns if columns else []
# self.titles = {}
# self._lines = []
# self.root_id = root_id
# self.name = name
#
# @property
# def lines(self):
# return self._lines
#
# def add_field(self, field, id_field=False):
# columns = self.id_columns if id_field else self.columns
# if field not in columns:
# columns.append(field)
#
# def append(self, item):
# self.add_field(item)
#
# def __iter__(self):
# if self.root_id:
# yield self.root_id
# for column in self.id_columns:
# yield column
# for column in self.columns:
# yield column
#
# def append_line(self, flattened_dict):
# self._lines.append(flattened_dict)
. Output only the next line. | ), |
Predict the next line after this snippet: <|code_start|>
def child_to_xml(parent_el, tagname, child, toplevel=False, nsmap=None):
if hasattr(child, "items"):
child_el = dict_to_xml(child, tagname, toplevel=False, nsmap=nsmap)
if child_el is not None:
parent_el.append(child_el)
else:
if tagname.startswith("@"):
if USING_LXML and toplevel and tagname.startswith("@xmlns"):
nsmap[tagname[1:].split(":", 1)[1]] = str(child)
return
try:
attr_name = tagname[1:]
if USING_LXML and ":" in attr_name:
attr_name = (
"{"
+ nsmap.get(attr_name.split(":", 1)[0], "")
+ "}"
+ attr_name.split(":", 1)[1]
)
parent_el.attrib[attr_name] = str(child)
except ValueError as e:
warn(str(e), DataErrorWarning)
elif tagname == "text()":
parent_el.text = str(child)
else:
raise ("Everything should end with text() or an attribute!")
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from warnings import warn
from flattentool.exceptions import DataErrorWarning
from flattentool.sort_xml import XMLSchemaWalker, sort_element
import lxml.etree as ET
import xml.etree.ElementTree as ET
and any relevant context from other files:
# Path: flattentool/exceptions.py
# class DataErrorWarning(UserWarning):
# """
# A warnings that indicates an error in the data, rather than the schema.
#
# """
#
# pass
#
# Path: flattentool/sort_xml.py
# class XMLSchemaWalker(object):
# """
# Class for traversing one or more XML schemas.
#
# Based on the Schema2Doc class in https://github.com/IATI/IATI-Standard-SSOT/blob/version-2.02/gen.py
# """
#
# def __init__(self, schemas):
# """
# schema -- the filename of the schema to use, e.g.
# 'iati-activities-schema.xsd'
# """
# self.trees = [ET.parse(schema) for schema in schemas]
#
# def get_schema_element(self, tag_name, name_attribute):
# """
# Return the specified element from the schema.
#
# tag_name -- the name of the tag in the schema, e.g. 'complexType'
# name_attribute -- the value of the 'name' attribute in the schema, ie.
# the name of the element/type etc. being described,
# e.g. iati-activities
# """
# for tree in self.trees:
# schema_element = tree.find(
# "xsd:{0}[@name='{1}']".format(tag_name, name_attribute),
# namespaces=namespaces,
# )
# if schema_element is not None:
# return schema_element
# return schema_element
#
# def element_loop(self, element, path):
# """
# Return information about the children of the supplied element.
# """
# a = element.attrib
# type_elements = []
# if "type" in a:
# complexType = self.get_schema_element("complexType", a["type"])
# if complexType is not None:
# type_elements = complexType.findall(
# "xsd:choice/xsd:element", namespaces=namespaces
# ) + complexType.findall(
# "xsd:sequence/xsd:element", namespaces=namespaces
# )
#
# children = (
# element.findall(
# "xsd:complexType/xsd:choice/xsd:element", namespaces=namespaces
# )
# + element.findall(
# "xsd:complexType/xsd:sequence/xsd:element", namespaces=namespaces
# )
# + element.findall(
# "xsd:complexType/xsd:all/xsd:element", namespaces=namespaces
# )
# + type_elements
# )
# child_tuples = []
# for child in children:
# a = child.attrib
# if "name" in a:
# child_tuples.append(
# (a["name"], child, None, a.get("minOccurs"), a.get("maxOccurs"))
# )
# else:
# child_tuples.append(
# (a["ref"], None, child, a.get("minOccurs"), a.get("maxOccurs"))
# )
# return child_tuples
#
# def create_schema_dict(self, parent_name, parent_element=None):
# """
# Create a nested OrderedDict representing the structure (and order!) of
# elements in the provided schema.
# """
# if parent_element is None:
# parent_element = self.get_schema_element("element", parent_name)
# if parent_element is None:
# return {}
#
# return OrderedDict(
# [
# (name, self.create_schema_dict(name, element))
# for name, element, _, _, _ in self.element_loop(parent_element, "")
# ]
# )
#
# def sort_element(element, schema_subdict):
# """
# Sort the given element's children according to the order of schema_subdict.
# """
# children = list(element)
# for child in children:
# element.remove(child)
# keys = list(schema_subdict.keys())
#
# def index_key(x):
# if x.tag in keys:
# return keys.index(x.tag)
# else:
# return len(keys) + 1
#
# for child in sorted(children, key=index_key):
# element.append(child)
# sort_element(child, schema_subdict.get(child.tag, {}))
. Output only the next line. | def dict_to_xml(data, tagname, toplevel=True, nsmap=None): |
Here is a snippet: <|code_start|> USING_LXML = True
# Note that lxml is now "required" - it's listed as a requirement in
# setup.py and is needed for the tests to pass.
# However, stdlib etree still exists as an unsupported feature.
except ImportError:
USING_LXML = False
warn("Using stdlib etree may work, but is not supported. Please install lxml.")
def sort_attributes(data):
attribs = []
other = []
for k, v in data.items():
(other, attribs)[k.startswith("@")].append((k, v))
return OrderedDict(sorted(attribs) + other)
def child_to_xml(parent_el, tagname, child, toplevel=False, nsmap=None):
if hasattr(child, "items"):
child_el = dict_to_xml(child, tagname, toplevel=False, nsmap=nsmap)
if child_el is not None:
parent_el.append(child_el)
else:
if tagname.startswith("@"):
if USING_LXML and toplevel and tagname.startswith("@xmlns"):
nsmap[tagname[1:].split(":", 1)[1]] = str(child)
return
try:
attr_name = tagname[1:]
<|code_end|>
. Write the next line using the current file imports:
from collections import OrderedDict
from warnings import warn
from flattentool.exceptions import DataErrorWarning
from flattentool.sort_xml import XMLSchemaWalker, sort_element
import lxml.etree as ET
import xml.etree.ElementTree as ET
and context from other files:
# Path: flattentool/exceptions.py
# class DataErrorWarning(UserWarning):
# """
# A warnings that indicates an error in the data, rather than the schema.
#
# """
#
# pass
#
# Path: flattentool/sort_xml.py
# class XMLSchemaWalker(object):
# """
# Class for traversing one or more XML schemas.
#
# Based on the Schema2Doc class in https://github.com/IATI/IATI-Standard-SSOT/blob/version-2.02/gen.py
# """
#
# def __init__(self, schemas):
# """
# schema -- the filename of the schema to use, e.g.
# 'iati-activities-schema.xsd'
# """
# self.trees = [ET.parse(schema) for schema in schemas]
#
# def get_schema_element(self, tag_name, name_attribute):
# """
# Return the specified element from the schema.
#
# tag_name -- the name of the tag in the schema, e.g. 'complexType'
# name_attribute -- the value of the 'name' attribute in the schema, ie.
# the name of the element/type etc. being described,
# e.g. iati-activities
# """
# for tree in self.trees:
# schema_element = tree.find(
# "xsd:{0}[@name='{1}']".format(tag_name, name_attribute),
# namespaces=namespaces,
# )
# if schema_element is not None:
# return schema_element
# return schema_element
#
# def element_loop(self, element, path):
# """
# Return information about the children of the supplied element.
# """
# a = element.attrib
# type_elements = []
# if "type" in a:
# complexType = self.get_schema_element("complexType", a["type"])
# if complexType is not None:
# type_elements = complexType.findall(
# "xsd:choice/xsd:element", namespaces=namespaces
# ) + complexType.findall(
# "xsd:sequence/xsd:element", namespaces=namespaces
# )
#
# children = (
# element.findall(
# "xsd:complexType/xsd:choice/xsd:element", namespaces=namespaces
# )
# + element.findall(
# "xsd:complexType/xsd:sequence/xsd:element", namespaces=namespaces
# )
# + element.findall(
# "xsd:complexType/xsd:all/xsd:element", namespaces=namespaces
# )
# + type_elements
# )
# child_tuples = []
# for child in children:
# a = child.attrib
# if "name" in a:
# child_tuples.append(
# (a["name"], child, None, a.get("minOccurs"), a.get("maxOccurs"))
# )
# else:
# child_tuples.append(
# (a["ref"], None, child, a.get("minOccurs"), a.get("maxOccurs"))
# )
# return child_tuples
#
# def create_schema_dict(self, parent_name, parent_element=None):
# """
# Create a nested OrderedDict representing the structure (and order!) of
# elements in the provided schema.
# """
# if parent_element is None:
# parent_element = self.get_schema_element("element", parent_name)
# if parent_element is None:
# return {}
#
# return OrderedDict(
# [
# (name, self.create_schema_dict(name, element))
# for name, element, _, _, _ in self.element_loop(parent_element, "")
# ]
# )
#
# def sort_element(element, schema_subdict):
# """
# Sort the given element's children according to the order of schema_subdict.
# """
# children = list(element)
# for child in children:
# element.remove(child)
# keys = list(schema_subdict.keys())
#
# def index_key(x):
# if x.tag in keys:
# return keys.index(x.tag)
# else:
# return len(keys) + 1
#
# for child in sorted(children, key=index_key):
# element.append(child)
# sort_element(child, schema_subdict.get(child.tag, {}))
, which may include functions, classes, or code. Output only the next line. | if USING_LXML and ":" in attr_name: |
Here is a snippet: <|code_start|>
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'codebgcolor' : '#D4D8DC',
'sidebarbgcolor' : '#183828',
'relbarbgcolor' : '#183828',
<|code_end|>
. Write the next line using the current file imports:
import sys, os
from fish.fishlib import VERSION
and context from other files:
# Path: fish/fishlib.py
# VERSION = __version__
, which may include functions, classes, or code. Output only the next line. | 'footerbgcolor' : '#183828', |
Here is a snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
data = generate_data(200) # np.random.randint(2, size=(10000, n_visible))
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
<|code_end|>
. Write the next line using the current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may include functions, classes, or code. Output only the next line. | initial_vmap = { rbm.v: T.matrix('v') } |
Predict the next line after this snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
data = generate_data(200) # np.random.randint(2, size=(10000, n_visible))
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
# try to calculate weight updates using CD-1 stats
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=1)
<|code_end|>
using the current file's imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and any relevant context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | umap = {} |
Using the snippet: <|code_start|> # mean_field_for_stats is a list of units for which 'mean_field' should be used to compute statistics, rather than 'sample'.
# complete units lists
visible_units = rbm.complete_units_list(visible_units)
hidden_units = rbm.complete_units_list(hidden_units)
context_units = rbm.complete_units_list(context_units)
# complete the supplied vmap
v0_vmap = rbm.complete_vmap(v0_vmap)
# extract the context vmap, because we will need to merge it into all other vmaps
context_vmap = dict((u, v0_vmap[u]) for u in context_units)
h0_activation_vmap = dict((h, h.activation(v0_vmap)) for h in hidden_units)
h0_stats_vmap, h0_gibbs_vmap = gibbs_step(rbm, v0_vmap, hidden_units, mean_field_for_stats, mean_field_for_gibbs)
# add context
h0_activation_vmap.update(context_vmap)
h0_gibbs_vmap.update(context_vmap)
h0_stats_vmap.update(context_vmap)
exp_input = [v0_vmap[u] for u in visible_units]
exp_context = [v0_vmap[u] for u in context_units]
exp_latent = [h0_gibbs_vmap[u] for u in hidden_units]
# scan requires a function that returns theano expressions, so we cannot pass vmaps in or out. annoying.
def gibbs_hvh(*args):
h0_gibbs_vmap = dict(zip(hidden_units, args))
v1_in_vmap = h0_gibbs_vmap.copy()
<|code_end|>
, determine the next line of code. You have imports:
from morb.base import Stats
import numpy as np
import theano
and context (class names, function names, or code) available:
# Path: morb/base.py
# class Stats(dict): # a stats object is just a dictionary of vmaps, but it also holds associated theano updates.
# def __init__(self, updates):
# self.theano_updates = updates
#
# def get_theano_updates(self):
# return self.theano_updates
. Output only the next line. | v1_in_vmap.update(context_vmap) # add context |
Here is a snippet: <|code_start|>emodel_train_so_far = []
edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
plt.title("MSE")
<|code_end|>
. Write the next line using the current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may include functions, classes, or code. Output only the next line. | plt.legend() |
Continue the code snippet: <|code_start|>precision_variables = [rbm.Wp.var, rbm.bvp.var]
umap = {}
for var in variables:
pu = var + (learning_rate/mb_size) * updaters.CDUpdater(rbm, var, s) # the learning rate is 0.001
if var in precision_variables:
pu = updaters.BoundUpdater(pu, bound=0, type='upper')
umap[var] = pu
print ">> Compiling functions..."
t = trainers.MinibatchTrainer(rbm, umap)
m = monitors.reconstruction_mse(s, rbm.v)
m_data = s['data'][rbm.v]
m_model = s['model'][rbm.v]
e_data = rbm.energy(s['data']).mean()
e_model = rbm.energy(s['model']).mean()
# train = t.compile_function(initial_vmap, mb_size=32, monitors=[m], name='train', mode=mode)
train = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, e_data, e_model], name='train', mode=mode)
evaluate = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, m_data, m_model, e_data, e_model], name='evaluate', train=False, mode=mode)
def plot_data(d):
plt.figure(5)
plt.clf()
<|code_end|>
. Use current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context (classes, functions, or code) from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | plt.imshow(d.reshape((28,28)), interpolation='gaussian') |
Here is a snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# load data
print ">> Loading dataset..."
f = gzip.open('datasets/mnist.pkl.gz','rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
train_set_x, train_set_y = train_set
valid_set_x, valid_set_y = valid_set
test_set_x, test_set_y = test_set
# TODO DEBUG
train_set_x = train_set_x[:10000]
valid_set_x = valid_set_x[:1000]
<|code_end|>
. Write the next line using the current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may include functions, classes, or code. Output only the next line. | n_visible = train_set_x.shape[1] |
Predict the next line for this snippet: <|code_start|>edata_train_so_far = []
emodel_train_so_far = []
edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
<|code_end|>
with the help of current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may contain function names, class names, or code. Output only the next line. | plt.title("MSE") |
Predict the next line for this snippet: <|code_start|>print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
edata_train_so_far = []
emodel_train_so_far = []
edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
<|code_end|>
with the help of current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may contain function names, class names, or code. Output only the next line. | plt.figure(1) |
Given the following code snippet before the placeholder: <|code_start|>
def sample_evolution(start, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
<|code_end|>
, predict the next line using imports from the current file:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context including class names, function names, and sometimes code from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | edata_train_so_far = [] |
Given the following code snippet before the placeholder: <|code_start|> sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
edata_train_so_far = []
emodel_train_so_far = []
<|code_end|>
, predict the next line using imports from the current file:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context including class names, function names, and sometimes code from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | edata_so_far = [] |
Here is a snippet: <|code_start|> plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
plt.title("MSE")
plt.legend()
plt.draw()
plt.figure(4)
plt.clf()
plt.plot(edata_so_far, label='validation / data')
plt.plot(emodel_so_far, label='validation / model')
plt.plot(edata_train_so_far, label='train / data')
plt.plot(emodel_train_so_far, label='train / model')
plt.title("energy")
plt.legend()
plt.draw()
# plot some samples
plt.figure(2)
plt.clf()
plt.imshow(vdata[0][0].reshape((28, 28)))
plt.draw()
plt.figure(3)
plt.clf()
plt.imshow(vmodel[0][0].reshape((28, 28)))
plt.draw()
print "Epoch %d" % epoch
<|code_end|>
. Write the next line using the current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may include functions, classes, or code. Output only the next line. | print "training set: MSE = %.6f, data energy = %.2f, model energy = %.2f" % (mse_train, edata_train, emodel_train) |
Predict the next line after this snippet: <|code_start|> sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
edata_train_so_far = []
emodel_train_so_far = []
<|code_end|>
using the current file's imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and any relevant context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | edata_so_far = [] |
Predict the next line for this snippet: <|code_start|>
# TRAINING
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
edata_train_so_far = []
emodel_train_so_far = []
edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
<|code_end|>
with the help of current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may contain function names, class names, or code. Output only the next line. | edata_valid = np.mean(edata) |
Using the snippet: <|code_start|>
# tensordot = T.tensordot # use theano implementation
class FixedBiasParameters(Parameters):
# Bias fixed at -1, which is useful for some energy functions (like Gaussian with fixed variance, Beta)
def __init__(self, rbm, units, name=None):
super(FixedBiasParameters, self).__init__(rbm, [units], name=name)
self.variables = []
self.u = units
self.terms[self.u] = lambda vmap: T.constant(-1, theano.config.floatX) # T.constant is necessary so scan doesn't choke on it
def energy_term(self, vmap):
s = vmap[self.u]
return T.sum(s, axis=range(1, s.ndim)) # NO minus sign! bias is -1 so this is canceled.
# sum over all but the minibatch dimension.
class ProdParameters(Parameters):
def __init__(self, rbm, units_list, W, name=None):
super(ProdParameters, self).__init__(rbm, units_list, name=name)
assert len(units_list) == 2
self.var = W
self.variables = [self.var]
self.vu = units_list[0]
self.hu = units_list[1]
self.terms[self.vu] = lambda vmap: T.dot(vmap[self.hu], W.T)
<|code_end|>
, determine the next line of code. You have imports:
from morb.base import Parameters
from theano.tensor.nnet import conv
from morb.misc import tensordot # better tensordot implementation that can be GPU accelerated
import theano
import theano.tensor as T
and context (class names, function names, or code) available:
# Path: morb/base.py
# class Parameters(object):
# def __init__(self, rbm, units_list, name=None):
# self.rbm = rbm
# self.units_list = units_list
# self.terms = {} # terms is a dict of FUNCTIONS that take a vmap.
# self.energy_gradients = {} # a dict of FUNCTIONS that take a vmap.
# self.energy_gradient_sums = {} # a dict of FUNCTIONS that take a vmap.
# self.name = name
# self.rbm.add_parameters(self)
#
# def activation_term_for(self, units, vmap):
# return self.terms[units](vmap)
#
# def energy_gradient_for(self, variable, vmap):
# """
# Returns the energy gradient for each example in the batch.
# """
# return self.energy_gradients[variable](vmap)
#
# def energy_gradient_sum_for(self, variable, vmap):
# """
# Returns the energy gradient, summed across the minibatch dimension.
# If a fast implementation for this is available in the energy_gradient_sums
# dictionary, this will be used. Else the energy gradient will be computed
# for each example in the batch (using the implementation from the
# energy_gradients dictionary) and then summed.
#
# Take a look at the ProdParameters implementation for an example of where
# this is useful: the gradient summed over the batch can be computed more
# efficiently with a dot product.
# """
# if variable in self.energy_gradient_sums:
# return self.energy_gradient_sums[variable](vmap)
# else:
# return T.sum(self.energy_gradients[variable](vmap), axis=0)
#
# def energy_term(self, vmap):
# raise NotImplementedError("Parameters base class")
#
# def affects(self, units):
# return (units in self.units_list)
#
# def __repr__(self):
# units_names = ", ".join(("'%s'" % u.name) for u in self.units_list)
# return "<morb:Parameters '%s' affecting %s>" % (self.name, units_names)
#
# Path: morb/misc.py
# def tensordot(a, b, axes=2):
# """
# implementation of tensordot that reduces to a regular matrix product. This allows tensordot to be GPU accelerated,
# which isn't possible with the default Theano implementation (which is just a wrapper around numpy.tensordot).
# based on code from Tijmen Tieleman's gnumpy http://www.cs.toronto.edu/~tijmen/gnumpy.html
# """
# if numpy.isscalar(axes):
# # if 'axes' is a number of axes to multiply and sum over (trailing axes
# # of a, leading axes of b), we can just reshape and use dot.
# outshape = tensor.concatenate([a.shape[:a.ndim - axes], b.shape[axes:]])
# outndim = a.ndim + b.ndim - 2*axes
# a_reshaped = a.reshape((tensor.prod(a.shape[:a.ndim - axes]), tensor.prod(a.shape[a.ndim - axes:])))
# b_reshaped = b.reshape((tensor.prod(b.shape[:axes]), tensor.prod(b.shape[axes:])))
# return tensor.dot(a_reshaped, b_reshaped).reshape(outshape, ndim=outndim)
# elif len(axes) == 2:
# # if 'axes' is a pair of axis lists, we first shuffle the axes of a and
# # b to reduce this to the first case (note the recursion).
# a_other, b_other = tuple(axes[0]), tuple(axes[1])
# num_axes = len(a_other)
# a_order = tuple(x for x in tuple(xrange(a.ndim)) if x not in a_other) + a_other
# b_order = b_other + tuple(x for x in tuple(xrange(b.ndim)) if x not in b_other)
# a_shuffled = a.dimshuffle(a_order)
# b_shuffled = b.dimshuffle(b_order)
# return tensordot(a_shuffled, b_shuffled, num_axes)
# else:
# raise ValueError("Axes should be scalar valued or a list/tuple of len 2.")
. Output only the next line. | self.terms[self.hu] = lambda vmap: T.dot(vmap[self.vu], W) |
Continue the code snippet: <|code_start|># train = t.compile_function(initial_vmap, mb_size=32, monitors=[m], name='train', mode=mode)
train = t.compile_function(initial_vmap, mb_size=100, monitors=[m, m_model], name='train', mode=mode)
evaluate = t.compile_function(initial_vmap, mb_size=100, monitors=[m, m_model], name='evaluate', train=False, mode=mode)
def sample_evolution(start, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
<|code_end|>
. Use current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context, plot_data
from theano import ProfileMode
and context (classes, functions, or code) from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | epochs = 200 |
Given the code snippet: <|code_start|> sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
epochs = 200
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
mact_train_so_far = []
mact_valid_so_far = []
<|code_end|>
, generate the next line using the imports in this file:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context, plot_data
from theano import ProfileMode
and context (functions, classes, or occasionally code) from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | for epoch in range(epochs): |
Using the snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# load data
print ">> Loading dataset..."
f = gzip.open('datasets/mnist.pkl.gz','rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
<|code_end|>
, determine the next line of code. You have imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context, plot_data
from theano import ProfileMode
and context (class names, function names, or code) available:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | train_set_x, train_set_y = train_set |
Predict the next line for this snippet: <|code_start|>
def sample_evolution(start, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
epochs = 200
print ">> Training for %d epochs..." % epochs
<|code_end|>
with the help of current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context, plot_data
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may contain function names, class names, or code. Output only the next line. | mses_train_so_far = [] |
Here is a snippet: <|code_start|>
def sample_evolution(start, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
epochs = 200
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
mact_train_so_far = []
<|code_end|>
. Write the next line using the current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context, plot_data
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may include functions, classes, or code. Output only the next line. | mact_valid_so_far = [] |
Predict the next line for this snippet: <|code_start|>
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# load data
print ">> Loading dataset..."
f = gzip.open('datasets/mnist.pkl.gz','rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
train_set_x, train_set_y = train_set
valid_set_x, valid_set_y = valid_set
test_set_x, test_set_y = test_set
# TODO DEBUG
# train_set_x = train_set_x[:10000]
valid_set_x = valid_set_x[:1000]
n_visible = train_set_x.shape[1]
n_hidden = 500
mb_size = 20
k = 15
<|code_end|>
with the help of current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may contain function names, class names, or code. Output only the next line. | learning_rate = 0.1 |
Next line prediction: <|code_start|>valid_set_x, valid_set_y = valid_set
test_set_x, test_set_y = test_set
# TODO DEBUG
# train_set_x = train_set_x[:10000]
valid_set_x = valid_set_x[:1000]
n_visible = train_set_x.shape[1]
n_hidden = 500
mb_size = 20
k = 15
learning_rate = 0.1
epochs = 15
print ">> Constructing RBM..."
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
persistent_vmap = { rbm.h: theano.shared(np.zeros((mb_size, n_hidden), dtype=theano.config.floatX)) }
# try to calculate weight updates using CD stats
print ">> Constructing contrastive divergence updaters..."
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=k, persistent_vmap=persistent_vmap, mean_field_for_stats=[rbm.v], mean_field_for_gibbs=[rbm.v])
umap = {}
for var in rbm.variables:
pu = var + (learning_rate / float(mb_size)) * updaters.CDUpdater(rbm, var, s)
<|code_end|>
. Use current file imports:
(import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode)
and context including class names, function names, or small code snippets from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | umap[var] = pu |
Based on the snippet: <|code_start|>edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
plt.title("MSE")
plt.legend()
<|code_end|>
, predict the immediate next line with the help of imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context (classes, functions, sometimes code) from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | plt.draw() |
Given the code snippet: <|code_start|>m_model = s['model'][rbm.v]
e_data = rbm.energy(s['data']).mean()
e_model = rbm.energy(s['model']).mean()
# train = t.compile_function(initial_vmap, mb_size=32, monitors=[m], name='train', mode=mode)
train = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, e_data, e_model], name='train', mode=mode)
evaluate = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, m_data, m_model, e_data, e_model], name='evaluate', train=False, mode=mode)
def plot_data(d):
plt.figure(5)
plt.clf()
plt.imshow(d.reshape((28,28)), interpolation='gaussian')
plt.draw()
def sample_evolution(start, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
<|code_end|>
, generate the next line using the imports in this file:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context (functions, classes, or occasionally code) from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | data = x[0] |
Predict the next line for this snippet: <|code_start|> edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
plt.title("MSE")
plt.legend()
plt.draw()
plt.figure(4)
plt.clf()
plt.plot(edata_so_far, label='validation / data')
plt.plot(emodel_so_far, label='validation / model')
plt.plot(edata_train_so_far, label='train / data')
<|code_end|>
with the help of current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may contain function names, class names, or code. Output only the next line. | plt.plot(emodel_train_so_far, label='train / model') |
Given the code snippet: <|code_start|>
class SelfUpdater(Updater):
def get_update(self):
return self.variable
DecayUpdater = SelfUpdater
# weight decay: the update == the parameter values themselves
# (decay constant is taken care of by ScaleUpdater)
class MomentumUpdater(Updater):
<|code_end|>
, generate the next line using the imports in this file:
from morb.base import Updater, SumUpdater, ScaleUpdater
import samplers
import theano
import theano.tensor as T
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: morb/base.py
# class Updater(object):
# # An Updater object updates a single parameter variable. Multiple Updaters can compute updates for a single variable, which can then be aggregated by composite Updaters (like the SumUpdater)
# def __init__(self, variable, stats_list=[]):
# # variable is a single parameter variable, not a Parameters object or a list of variables.
# self.variable = variable
# self.stats_list = stats_list
# self.theano_updates = {} # some Updaters have state. Most don't, so then this is just
# # an empty dictionary. Those who do have state (like the MomentumUpdater) override
# # this variable.
#
# def get_update(self):
# raise NotImplementedError("Updater base class")
#
# def get_theano_updates(self):
# """
# gets own updates and the updates of all contained updaters (if applicable).
# """
# return self.theano_updates
#
# def _to_updater(self, e):
# """
# helper function that turns any expression into an updater
# """
# if not isinstance(e, Updater):
# eu = ExpressionUpdater(self.variable, e)
# return eu
# else:
# return e
#
# def __add__(self, p2):
# p2 = self._to_updater(p2)
# return SumUpdater([self, p2])
#
# def __sub__(self, p2):
# p2 = self._to_updater(p2)
# return self + (-p2)
#
# __radd__ = __add__
# __rsub__ = __sub__
#
# def __neg__(self):
# return ScaleUpdater(self, -1)
#
# def __mul__(self, a):
# return ScaleUpdater(self, a)
# # a is assumed to be a scalar!
#
# def __div__(self, a):
# return self * (1.0/a)
#
# __rmul__ = __mul__
# __rdiv__ = __div__
#
# class SumUpdater(Updater):
# def __init__(self, updaters):
# # assert that all updaters affect the same variable, gather stats collectors
# self.updaters = updaters
# stats_list = []
# for pu in updaters:
# if pu.variable != updaters[0].variable:
# raise RuntimeError("Cannot add Updaters that affect a different variable together")
# stats_list.extend(pu.stats_list)
# stats_list = _unique(stats_list) # we only need each Stats object once.
#
# super(SumUpdater, self).__init__(updaters[0].variable, stats_list)
#
# def get_update(self):
# return sum((pu.get_update() for pu in self.updaters), T.constant(0, theano.config.floatX))
#
# def get_theano_updates(self):
# u = {} # a sum updater has no state, so it has no theano updates of its own.
# for pu in self.updaters:
# u.update(pu.get_theano_updates())
# return u
#
# class ScaleUpdater(Updater):
# def __init__(self, pu, scaling_factor):
# super(ScaleUpdater, self).__init__(pu.variable, pu.stats_list)
# self.pu = pu
# self.scaling_factor = scaling_factor
#
# def get_update(self):
# return self.scaling_factor * self.pu.get_update()
#
# def get_theano_updates(self):
# u = {} # a scale updater has no state, so it has no theano updates of its own.
# u.update(self.pu.get_theano_updates())
# return u
. Output only the next line. | def __init__(self, pu, momentum, variable_shape): |
Given the following code snippet before the placeholder: <|code_start|>mode = None
# generate data
data = generate_data(200)
# use the predefined binary-binary RBM, which has visible units (rbm.v), hidden units (rbm.h),
# a weight matrix W connecting them (rbm.W), and visible and hidden biases (rbm.bv and rbm.bh).
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
# We use single-step contrastive divergence (CD-1) to train the RBM. For this, we can use
# the CDUpdater. This requires symbolic CD-1 statistics:
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=1, mean_field_for_gibbs=[rbm.v], mean_field_for_stats=[rbm.v, rbm.h])
# We create an updater for each parameter variable
umap = {}
for var in rbm.variables:
pu = var + 0.001 * updaters.CDUpdater(rbm, var, s) # the learning rate is 0.001
umap[var] = pu
# training
t = trainers.MinibatchTrainer(rbm, umap)
mse = monitors.reconstruction_mse(s, rbm.v)
train = t.compile_function(initial_vmap, mb_size=32, monitors=[mse], name='train', mode=mode)
epochs = 50
start_time = time.time()
<|code_end|>
, predict the next line using imports from the current file:
import morb
import theano
import theano.tensor as T
import numpy as np
import time
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context including class names, function names, and sometimes code from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | for epoch in range(epochs): |
Given snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
data = generate_data(200)
# use the predefined binary-binary RBM, which has visible units (rbm.v), hidden units (rbm.h),
# a weight matrix W connecting them (rbm.W), and visible and hidden biases (rbm.bv and rbm.bh).
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import time
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
which might include code, classes, or functions. Output only the next line. | n_visible = data.shape[1] |
Predict the next line after this snippet: <|code_start|>
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
data = generate_data(200)
# use the predefined binary-binary RBM, which has visible units (rbm.v), hidden units (rbm.h),
# a weight matrix W connecting them (rbm.W), and visible and hidden biases (rbm.bv and rbm.bh).
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
# We use single-step contrastive divergence (CD-1) to train the RBM. For this, we can use
# the CDUpdater. This requires symbolic CD-1 statistics:
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=1, mean_field_for_gibbs=[rbm.v], mean_field_for_stats=[rbm.v, rbm.h])
# We create an updater for each parameter variable
umap = {}
for var in rbm.variables:
pu = var + 0.001 * updaters.CDUpdater(rbm, var, s) # the learning rate is 0.001
umap[var] = pu
# training
t = trainers.MinibatchTrainer(rbm, umap)
mse = monitors.reconstruction_mse(s, rbm.v)
train = t.compile_function(initial_vmap, mb_size=32, monitors=[mse], name='train', mode=mode)
<|code_end|>
using the current file's imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import time
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and any relevant context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | epochs = 50 |
Given the code snippet: <|code_start|>data = generate_data(200)
# use the predefined binary-binary RBM, which has visible units (rbm.v), hidden units (rbm.h),
# a weight matrix W connecting them (rbm.W), and visible and hidden biases (rbm.bv and rbm.bh).
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
# We use single-step contrastive divergence (CD-1) to train the RBM. For this, we can use
# the CDUpdater. This requires symbolic CD-1 statistics:
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=1, mean_field_for_gibbs=[rbm.v], mean_field_for_stats=[rbm.v, rbm.h])
# We create an updater for each parameter variable
umap = {}
for var in rbm.variables:
pu = var + 0.001 * updaters.CDUpdater(rbm, var, s) # the learning rate is 0.001
umap[var] = pu
# training
t = trainers.MinibatchTrainer(rbm, umap)
mse = monitors.reconstruction_mse(s, rbm.v)
train = t.compile_function(initial_vmap, mb_size=32, monitors=[mse], name='train', mode=mode)
epochs = 50
start_time = time.time()
for epoch in range(epochs):
print "Epoch %d" % epoch
costs = [m for m in train({ rbm.v: data })]
<|code_end|>
, generate the next line using the imports in this file:
import morb
import theano
import theano.tensor as T
import numpy as np
import time
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context (functions, classes, or occasionally code) from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | print "MSE = %.4f" % np.mean(costs) |
Based on the snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
data = generate_data(200)
# use the predefined binary-binary RBM, which has visible units (rbm.v), hidden units (rbm.h),
# a weight matrix W connecting them (rbm.W), and visible and hidden biases (rbm.bv and rbm.bh).
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
# We use single-step contrastive divergence (CD-1) to train the RBM. For this, we can use
# the CDParamUpdater. This requires symbolic CD-1 statistics:
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=1)
# We create an updater for each parameter variable
umap = {}
<|code_end|>
, predict the immediate next line with the help of imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import time
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context (classes, functions, sometimes code) from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | for var in rbm.variables: |
Here is a snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
data = generate_data(200)
# use the predefined binary-binary RBM, which has visible units (rbm.v), hidden units (rbm.h),
# a weight matrix W connecting them (rbm.W), and visible and hidden biases (rbm.bv and rbm.bh).
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
<|code_end|>
. Write the next line using the current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import time
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may include functions, classes, or code. Output only the next line. | initial_vmap = { rbm.v: T.matrix('v') } |
Predict the next line for this snippet: <|code_start|># mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
data = generate_data(200)
# use the predefined binary-binary RBM, which has visible units (rbm.v), hidden units (rbm.h),
# a weight matrix W connecting them (rbm.W), and visible and hidden biases (rbm.bv and rbm.bh).
n_visible = data.shape[1]
n_hidden = 100
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
# We use single-step contrastive divergence (CD-1) to train the RBM. For this, we can use
# the CDParamUpdater. This requires symbolic CD-1 statistics:
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=1)
# We create an updater for each parameter variable
umap = {}
for var in rbm.variables:
pu = var + 0.001 * updaters.CDUpdater(rbm, var, s) # the learning rate is 0.001
umap[var] = pu
# training
t = trainers.MinibatchTrainer(rbm, umap)
mse = monitors.reconstruction_mse(s, rbm.v)
free_energy = T.mean(rbm.free_energy([rbm.h], s['data'])) # take the mean over the minibatch.
train = t.compile_function(initial_vmap, mb_size=32, monitors=[mse, free_energy], name='train', mode=mode)
<|code_end|>
with the help of current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import time
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
, which may contain function names, class names, or code. Output only the next line. | epochs = 50 |
Next line prediction: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
print ">> Generating dataset..."
data = generate_data(1000) # np.random.randint(2, size=(10000, n_visible))
data_context = get_context(data)
data_train = data[:-1000, :]
data_eval = data[-1000:, :]
data_context_train = data_context[:-1000, :]
data_context_eval = data_context[-1000:, :]
n_visible = data.shape[1]
<|code_end|>
. Use current file imports:
(import morb
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode)
and context including class names, function names, or small code snippets from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | n_context = data_context.shape[1] |
Given the following code snippet before the placeholder: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
print ">> Generating dataset..."
data = generate_data(1000) # np.random.randint(2, size=(10000, n_visible))
data_context = get_context(data)
<|code_end|>
, predict the next line using imports from the current file:
import morb
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context including class names, function names, and sometimes code from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | data_train = data[:-1000, :] |
Given snippet: <|code_start|># mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
print ">> Generating dataset..."
data = generate_data(1000) # np.random.randint(2, size=(10000, n_visible))
data_context = get_context(data)
data_train = data[:-1000, :]
data_eval = data[-1000:, :]
data_context_train = data_context[:-1000, :]
data_context_eval = data_context[-1000:, :]
n_visible = data.shape[1]
n_context = data_context.shape[1]
n_hidden = 100
print ">> Constructing RBM..."
rbm = rbms.BinaryBinaryCRBM(n_visible, n_hidden, n_context)
initial_vmap = { rbm.v: T.matrix('v'), rbm.x: T.matrix('x') }
# try to calculate weight updates using CD-1 stats
print ">> Constructing contrastive divergence updaters..."
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], context_units=[rbm.x], k=1)
umap = {}
for var in rbm.variables:
pu = var + 0.0005 * updaters.CDUpdater(rbm, var, s)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and context:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
which might include code, classes, or functions. Output only the next line. | umap[var] = pu |
Predict the next line after this snippet: <|code_start|>
plt.ion()
# DEBUGGING
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# generate data
print ">> Generating dataset..."
data = generate_data(1000) # np.random.randint(2, size=(10000, n_visible))
data_context = get_context(data)
<|code_end|>
using the current file's imports:
import morb
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from morb import rbms, stats, updaters, trainers, monitors
from utils import generate_data, get_context
from theano import ProfileMode
and any relevant context from other files:
# Path: morb/rbms.py
# class BinaryBinaryRBM(RBM): # the basic RBM, with binary visibles and binary hiddens
# class BinaryBinaryCRBM(BinaryBinaryRBM):
# class GaussianBinaryRBM(RBM): # Gaussian visible units
# class LearntPrecisionGaussianBinaryRBM(RBM):
# class LearntPrecisionSeparateGaussianBinaryRBM(RBM):
# class TruncExpBinaryRBM(RBM): # RBM with truncated exponential visibles and binary hiddens
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden, n_context):
# def _initial_A(self):
# def _initial_B(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
# def _initial_W(self, nv, nh):
# def _initial_bias(self, n):
# def __init__(self, n_visible, n_hidden):
# def _initial_W(self):
# def _initial_bv(self):
# def _initial_bh(self):
#
# Path: morb/stats.py
# def gibbs_step(rbm, vmap, units_list, mean_field_for_stats=[], mean_field_for_gibbs=[]):
# def cd_stats(rbm, v0_vmap, visible_units, hidden_units, context_units=[], k=1, mean_field_for_stats=[], mean_field_for_gibbs=[], persistent_vmap=None):
# def gibbs_hvh(*args):
#
# Path: morb/updaters.py
# class SelfUpdater(Updater):
# class MomentumUpdater(Updater):
# class CDUpdater(Updater):
# class SparsityUpdater(Updater):
# class BoundUpdater(Updater):
# class GradientUpdater(Updater):
# def get_update(self):
# def __init__(self, pu, momentum, variable_shape):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, rbm, variable, stats):
# def get_update(self):
# def __init__(self, rbm, variable, sparsity_targets, stats):
# def get_update(self):
# def __init__(self, pu, bound=0, type='lower'):
# def get_update(self):
# def get_theano_updates(self):
# def __init__(self, objective, variable, theano_updates={}):
# def get_update(self):
# def get_theano_updates(self):
#
# Path: morb/trainers.py
# class MinibatchTrainer(Trainer):
# def compile_function(self, initial_vmap, monitors=[], name='func', mb_size=32, train=True, mode=None):
# def func(dmap):
# TF = theano.function([index], monitors,
# updates = updates, givens = givens, name = name, mode = mode)
#
# Path: morb/monitors.py
# def reconstruction_mse(stats, u):
# def reconstruction_error_rate(stats, u):
# def reconstruction_crossentropy(stats, u):
. Output only the next line. | data_train = data[:-1000, :] |
Based on the snippet: <|code_start|>
loop = get_loop()
@asyncio.coroutine
def run(cmd, **kwargs):
transport, protocol = yield from async_execute_process(
create_protocol(), cmd, **kwargs)
<|code_end|>
, predict the immediate next line with the help of imports:
from osrf_pycommon.process_utils import asyncio
from osrf_pycommon.process_utils.async_execute_process import async_execute_process
from osrf_pycommon.process_utils import get_loop
from .impl_aep_protocol import create_protocol
and context (classes, functions, sometimes code) from other files:
# Path: ros2_batch_job/vendor/osrf_pycommon/tests/unit/test_process_utils/impl_aep_protocol.py
# def create_protocol():
# class CustomProtocol(AsyncSubprocessProtocol):
# def __init__(self, *args, **kwargs):
# self.stdout_buffer = b""
# self.stderr_buffer = b""
# AsyncSubprocessProtocol.__init__(self, *args, **kwargs)
#
# def on_stdout_received(self, data):
# self.stdout_buffer += data
#
# def on_stderr_received(self, data):
# self.stderr_buffer += data
# return CustomProtocol
. Output only the next line. | retcode = yield from protocol.complete |
Given snippet: <|code_start|># You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
except ImportError:
# to support --cover-inclusive on Windows
if os.name not in ['nt']:
raise
def _execute_process_pty(cmd, cwd, env, shell, stderr_to_stdout=True):
stdout_master, stdout_slave = None, None
stderr_master, stderr_slave = None, None
fds_to_close = [stdout_master, stdout_slave, stderr_master, stderr_slave]
try:
stdout_master, stdout_slave = pty.openpty()
if stderr_to_stdout:
stderr_master, stderr_slave = stdout_master, stdout_slave
else:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import pty
import time
from subprocess import Popen
from subprocess import STDOUT
from .execute_process_nopty import _close_fds
from .execute_process_nopty import _yield_data
and context:
# Path: ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/process_utils/execute_process_nopty.py
# def _close_fds(fds_to_close):
# # This function is used to close (if not already closed) any fds used
# for s in fds_to_close:
# if s is None:
# continue
# try:
# os.close(s)
# except OSError as exc:
# # This could raise "OSError: [Errno 9] Bad file descriptor"
# # If it has already been closed, but that's ok
# if "Bad file descriptor" not in "{0}".format(exc):
# raise
#
# Path: ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/process_utils/execute_process_nopty.py
# def _yield_data(p, fds, left_overs, linesep, fds_to_close=None):
# # This function uses select and subprocess.Popen.poll to collect out
# # from a subprocess until it has finished, yielding it as it goes
# fds_to_close = [] if fds_to_close is None else fds_to_close
#
# def yield_to_stream(data, stream):
# if stream == fds[0]:
# return data, None, None
# else:
# return None, data, None
#
# try:
# while p.poll() is None:
# # If Windows
# if _is_windows:
# for stream in fds:
# # This will not produce the best results, but at least
# # it will function on Windows. A True IOCP implementation
# # would be required to get streaming from Windows streams.
# data = stream.readline()
# if data:
# yield yield_to_stream(data, stream)
# continue
# # Otherwise Unix
# try:
# rlist, wlist, xlist = select.select(fds, [], [])
# except select.error as exc:
# # Ignore EINTR
# try:
# errnum = exc.errno
# except AttributeError:
# errnum = exc[0]
# if errnum == errno.EINTR:
# continue
# raise
# for stream in rlist:
# left_over = left_overs[stream]
# fileno = getattr(stream, 'fileno', lambda: stream)()
# try:
# incoming = os.read(fileno, 1024)
# except OSError as exc:
# # On Linux, when using a pty, in order to get select
# # to return when the subprocess finishes, os.close
# # must be called on the slave pty fd after forking
# # the subprocess with popen. On some versions of
# # the Linux kernel this causes an Errno 5 OSError,
# # "Input/output error". Therefore, I am explicitly
# # catching and passing on this error. In my testing
# # this error does not occur repeatedly (it does not
# # become a busy wait). See:
# # http://stackoverflow.com/a/12207447/671658
# if _is_linux and "Input/output error" in "{0}".format(exc):
# continue
# raise
# if not incoming:
# # In this case, EOF has been reached, see docs for os.read
# if left_over:
# yield yield_to_stream(left_over, stream)
# continue
# data, left_over = _process_incoming_lines(incoming, left_over)
# left_overs[stream] = left_over
# yield yield_to_stream(data, stream)
# # Done
# yield None, None, p.returncode
# finally:
# # Make sure we don't leak file descriptors
# _close_fds(fds_to_close)
which might include code, classes, or functions. Output only the next line. | stderr_master, stderr_slave = pty.openpty() |
Based on the snippet: <|code_start|># Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
except ImportError:
# to support --cover-inclusive on Windows
if os.name not in ['nt']:
raise
def _execute_process_pty(cmd, cwd, env, shell, stderr_to_stdout=True):
stdout_master, stdout_slave = None, None
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import pty
import time
from subprocess import Popen
from subprocess import STDOUT
from .execute_process_nopty import _close_fds
from .execute_process_nopty import _yield_data
and context (classes, functions, sometimes code) from other files:
# Path: ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/process_utils/execute_process_nopty.py
# def _close_fds(fds_to_close):
# # This function is used to close (if not already closed) any fds used
# for s in fds_to_close:
# if s is None:
# continue
# try:
# os.close(s)
# except OSError as exc:
# # This could raise "OSError: [Errno 9] Bad file descriptor"
# # If it has already been closed, but that's ok
# if "Bad file descriptor" not in "{0}".format(exc):
# raise
#
# Path: ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/process_utils/execute_process_nopty.py
# def _yield_data(p, fds, left_overs, linesep, fds_to_close=None):
# # This function uses select and subprocess.Popen.poll to collect out
# # from a subprocess until it has finished, yielding it as it goes
# fds_to_close = [] if fds_to_close is None else fds_to_close
#
# def yield_to_stream(data, stream):
# if stream == fds[0]:
# return data, None, None
# else:
# return None, data, None
#
# try:
# while p.poll() is None:
# # If Windows
# if _is_windows:
# for stream in fds:
# # This will not produce the best results, but at least
# # it will function on Windows. A True IOCP implementation
# # would be required to get streaming from Windows streams.
# data = stream.readline()
# if data:
# yield yield_to_stream(data, stream)
# continue
# # Otherwise Unix
# try:
# rlist, wlist, xlist = select.select(fds, [], [])
# except select.error as exc:
# # Ignore EINTR
# try:
# errnum = exc.errno
# except AttributeError:
# errnum = exc[0]
# if errnum == errno.EINTR:
# continue
# raise
# for stream in rlist:
# left_over = left_overs[stream]
# fileno = getattr(stream, 'fileno', lambda: stream)()
# try:
# incoming = os.read(fileno, 1024)
# except OSError as exc:
# # On Linux, when using a pty, in order to get select
# # to return when the subprocess finishes, os.close
# # must be called on the slave pty fd after forking
# # the subprocess with popen. On some versions of
# # the Linux kernel this causes an Errno 5 OSError,
# # "Input/output error". Therefore, I am explicitly
# # catching and passing on this error. In my testing
# # this error does not occur repeatedly (it does not
# # become a busy wait). See:
# # http://stackoverflow.com/a/12207447/671658
# if _is_linux and "Input/output error" in "{0}".format(exc):
# continue
# raise
# if not incoming:
# # In this case, EOF has been reached, see docs for os.read
# if left_over:
# yield yield_to_stream(left_over, stream)
# continue
# data, left_over = _process_incoming_lines(incoming, left_over)
# left_overs[stream] = left_over
# yield yield_to_stream(data, stream)
# # Done
# yield None, None, p.returncode
# finally:
# # Make sure we don't leak file descriptors
# _close_fds(fds_to_close)
. Output only the next line. | stderr_master, stderr_slave = None, None |
Using the snippet: <|code_start|>
class BatchJob:
def __init__(self, *, python_interpreter=None):
self.run = run
self.run_history = []
self.python = sys.executable if python_interpreter is None else python_interpreter
self.python_history = []
def pre(self):
raise NotImplementedError()
def post(self):
raise NotImplementedError()
def show_env(self):
raise NotImplementedError()
def setup_env(self):
raise NotImplementedError()
def push_run(self, run_func):
self.run_history.append(self.run)
self.run = run_func
def pop_run(self):
current_run_func = self.run
if not self.run_history:
<|code_end|>
, determine the next line of code. You have imports:
import sys
from .util import run
and context (class names, function names, or code) available:
# Path: ros2_batch_job/util.py
# def run(cmd, exit_on_error=True, **kwargs):
# log("@{bf}==>@| @!{0}", fargs=(" ".join(cmd),))
# ret = _run(cmd, exit_on_error=exit_on_error, **kwargs)
# print()
# return ret
. Output only the next line. | raise RuntimeError("Called pop_run with an empty run history.") |
Predict the next line for this snippet: <|code_start|># Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
except ImportError:
# pty doesn't work on Windows, it will fail to import
# so fallback to non pty implementation
_execute_process_pty = None
try:
_basestring = basestring # Python 2
except NameError:
_basestring = str # Python 3
<|code_end|>
with the help of current file imports:
import os
import sys
from .execute_process_nopty import _execute_process_nopty
from .execute_process_pty import _execute_process_pty
from shutil import which as _which
and context from other files:
# Path: ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/process_utils/execute_process_nopty.py
# def _execute_process_nopty(cmd, cwd, env, shell, stderr_to_stdout=True):
# if stderr_to_stdout:
# p = Popen(cmd,
# stdin=PIPE, stdout=PIPE, stderr=STDOUT,
# cwd=cwd, env=env, shell=shell, close_fds=False)
# else:
# p = Popen(cmd,
# stdin=PIPE, stdout=PIPE, stderr=PIPE,
# cwd=cwd, env=env, shell=shell, close_fds=False)
#
# # Left over data from read which isn't a complete line yet
# left_overs = {p.stdout: b'', p.stderr: b''}
#
# fds = list(filter(None, [p.stdout, p.stderr]))
#
# return _yield_data(p, fds, left_overs, os.linesep)
, which may contain function names, class names, or code. Output only the next line. | def execute_process(cmd, cwd=None, env=None, shell=False, emulate_tty=False): |
Predict the next line for this snippet: <|code_start|>
def _pack_attrs(foreground, background, style):
return foreground + (background * 16) + style
def _win_reset(handle, attrs):
SetConsoleTextAttribute(handle, attrs)
return attrs
def _win_style(style, handle, attrs):
attrs_list = _unpack_attrs(attrs)
attrs_list[2] = style
attrs = _pack_attrs(*attrs_list)
SetConsoleTextAttribute(handle, attrs)
return attrs
def _win_foreground(foreground, handle, attrs):
attrs_list = _unpack_attrs(attrs)
attrs_list[0] = foreground
attrs = _pack_attrs(*attrs_list)
SetConsoleTextAttribute(handle, attrs)
return attrs
def _win_background(background, handle, attrs):
attrs_list = _unpack_attrs(attrs)
attrs_list[1] = background
<|code_end|>
with the help of current file imports:
import ctypes
import ctypes.wintypes
import os
import sys
from .ansi_re import split_by_ansi_escape_sequence
and context from other files:
# Path: ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/terminal_color/ansi_re.py
# def split_by_ansi_escape_sequence(string, include_delimiters=False):
# """
# Splits a string into a list using any ansi escape sequence as a delimiter.
#
# :param string: string to be split
# :type string: str
# :param include_delimiters: If True include matched escape sequences in
# the list (default: False)
# :type include_delimiters: bool
# :returns: list of strings, split from original string by escape sequences
# :rtype: list
# """
# global _ansi_re, _ansi_re_group
# if include_delimiters:
# return _ansi_re_group.split(string)
# return _ansi_re.split(string)
, which may contain function names, class names, or code. Output only the next line. | attrs = _pack_attrs(*attrs_list) |
Given the code snippet: <|code_start|>#!/usr/bin/env python3
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make sure we're using Python3
assert sys.version.startswith('3'), "This script is only meant to work with Python3"
# Make sure we get the local imports
this_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, this_dir)
assert ros2_batch_job.__file__.startswith(this_dir), \
<|code_end|>
, generate the next line using the imports in this file:
import os
import sys
import ros2_batch_job
from ros2_batch_job.__main__ import main
and context (functions, classes, or occasionally code) from other files:
# Path: ros2_batch_job/__main__.py
# def main(sysargv=None):
# args = get_args(sysargv=sysargv)
# blacklisted_package_names = []
# if not args.packaging:
# build_function = build_and_test
# blacklisted_package_names += [
# 'actionlib_msgs',
# 'common_interfaces',
# 'cv_bridge',
# 'opencv_tests',
# 'ros1_bridge',
# 'shape_msgs',
# 'stereo_msgs',
# 'vision_opencv',
# ]
# else:
# build_function = build_and_test_and_package
# if sys.platform in ('darwin', 'win32'):
# blacklisted_package_names += [
# 'pendulum_control',
# 'ros1_bridge',
# 'rttest',
# 'tlsf',
# 'tlsf_cpp',
# ]
#
# # There are no Windows debug packages available for PyQt5 and PySide2, so
# # python_qt_bindings can't be imported to run or test rqt_graph or
# # rqt_py_common.
# if sys.platform == 'win32' and args.cmake_build_type == 'Debug':
# blacklisted_package_names.append('rqt_graph')
# blacklisted_package_names.append('rqt_py_common')
# blacklisted_package_names.append('rqt_reconfigure')
#
# # TODO(wjwwood): remove this when a better solution is found, as
# # this is just a work around for https://github.com/ros2/build_cop/issues/161
# # If on Windows, kill any still running `colcon` processes to avoid
# # problems when trying to delete files from pip or the workspace during
# # this job.
# if sys.platform == 'win32':
# os.system('taskkill /f /im colcon.exe')
# time.sleep(2) # wait a bit to avoid a race
#
# return run(args, build_function, blacklisted_package_names=blacklisted_package_names)
. Output only the next line. | "ros2_batch_job was imported from somewhere other than the local directory of this script" |
Predict the next line for this snippet: <|code_start|> ):
loop = get_loop()
# Create the PTY's
stdout_master, stdout_slave = pty.openpty()
if stderr_to_stdout:
stderr_master, stderr_slave = stdout_master, stdout_slave
else:
stderr_master, stderr_slave = pty.openpty()
def protocol_factory():
return protocol_class(None, stdout_master, stderr_master)
# Start the subprocess
if shell is True:
transport, protocol = yield from loop.subprocess_shell(
protocol_factory, " ".join(cmd), cwd=cwd, env=env,
stdout=stdout_slave, stderr=stderr_slave, close_fds=False)
else:
transport, protocol = yield from loop.subprocess_exec(
protocol_factory, *cmd, cwd=cwd, env=env,
stdout=stdout_slave, stderr=stderr_slave, close_fds=False)
# Close our copies of the slaves,
# the child's copy of the slave remain open until it terminates
os.close(stdout_slave)
if not stderr_to_stdout:
os.close(stderr_slave)
# Create Protocol classes
class PtyStdoutProtocol(asyncio.Protocol):
<|code_end|>
with the help of current file imports:
import asyncio
import os
import pty
from ..get_loop_impl import get_loop_impl
and context from other files:
# Path: ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/process_utils/get_loop_impl.py
# def get_loop_impl(asyncio):
# global _thread_local
# if getattr(_thread_local, 'loop_has_been_setup', False):
# return asyncio.get_event_loop()
# # Setup this thread's loop and return it
# if os.name == 'nt':
# loop = asyncio.ProactorEventLoop()
# asyncio.set_event_loop(loop)
# else:
# try:
# loop = asyncio.get_event_loop()
# except AssertionError:
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# _thread_local.loop_has_been_setup = True
# return loop
, which may contain function names, class names, or code. Output only the next line. | def connection_made(self, transport): |
Continue the code snippet: <|code_start|> # create an archive
folder_name = 'ros2-' + args.os
if args.os == 'linux' or args.os == 'osx':
if args.os == 'osx':
machine = platform.machine()
else:
machine = sys.implementation._multiarch.split('-', 1)[0]
archive_path = 'ros2-package-%s-%s.tar.bz2' % (args.os, machine)
def exclude_filter(tarinfo):
if tarinfo.isfile() and os.path.basename(tarinfo.name) == 'SOURCES.txt':
if os.path.dirname(tarinfo.name).endswith('.egg-info'):
return None # returning None will exclude it from the archive
return tarinfo
with tarfile.open(archive_path, 'w:bz2') as h:
h.add(args.installspace, arcname=folder_name, filter=exclude_filter)
elif args.os == 'windows':
archive_path = 'ros2-package-windows-%s.zip' % platform.machine()
with zipfile.ZipFile(archive_path, 'w') as zf:
for dirname, subdirs, files in os.walk(args.installspace):
arcname = os.path.join(
folder_name, os.path.relpath(dirname, start=args.installspace))
zf.write(dirname, arcname=arcname)
for filename in files:
if os.path.basename(filename) == 'SOURCES.txt':
if dirname.endswith('.egg-info'):
continue
filearcname = os.path.join(
folder_name, os.path.relpath(dirname, start=args.installspace), filename)
zf.write(os.path.join(dirname, filename), arcname=filearcname)
<|code_end|>
. Use current file imports:
import glob
import os
import platform
import shutil
import sys
import tarfile
import zipfile
from .util import info
and context (classes, functions, or code) from other files:
# Path: ros2_batch_job/util.py
# def info(*args, **kwargs):
# log("@!II>@| ", *args, **kwargs)
. Output only the next line. | else: |
Given the code snippet: <|code_start|>#!/usr/bin/python3
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version. See http://www.gnu.org/copyleft/lgpl.html for the full text
# of the license.
__author__ = 'Iftikhar Ahmad'
__copyright__ = '(c) 2012 Canonical Ltd.'
tracemalloc.start(25)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
have_nmcli = shutil.which('nmcli')
<|code_end|>
, generate the next line using the imports in this file:
import os
import re
import shutil
import subprocess
import sys
import unittest
import tracemalloc
import dbus
import dbus.mainloop.glib
import dbusmock
from gi.repository import GLib
from dbusmock.templates.networkmanager import DeviceState
from dbusmock.templates.networkmanager import NM80211ApSecurityFlags
from dbusmock.templates.networkmanager import InfrastructureMode
from dbusmock.templates.networkmanager import NMActiveConnectionState
from dbusmock.templates.networkmanager import NMState
from dbusmock.templates.networkmanager import NMConnectivityState
from dbusmock.templates.networkmanager import (CSETTINGS_IFACE, MANAGER_IFACE,
SETTINGS_OBJ, SETTINGS_IFACE)
and context (functions, classes, or occasionally code) from other files:
# Path: dbusmock/templates/networkmanager.py
# class DeviceState:
# '''Device states
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMDeviceState
# '''
# UNKNOWN = 0
# UNMANAGED = 10
# UNAVAILABLE = 20
# DISCONNECTED = 30
# PREPARE = 40
# CONFIG = 50
# NEED_AUTH = 60
# IP_CONFIG = 70
# IP_CHECK = 80
# SECONDARIES = 90
# ACTIVATED = 100
# DEACTIVATING = 110
# FAILED = 120
#
# Path: dbusmock/templates/networkmanager.py
# class NM80211ApSecurityFlags:
# '''Security flags
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NM80211ApSecurityFlags
# '''
# NM_802_11_AP_SEC_NONE = 0x00000000
# NM_802_11_AP_SEC_PAIR_WEP40 = 0x00000001
# NM_802_11_AP_SEC_PAIR_WEP104 = 0x00000002
# NM_802_11_AP_SEC_PAIR_TKIP = 0x00000004
# NM_802_11_AP_SEC_PAIR_CCMP = 0x00000008
# NM_802_11_AP_SEC_GROUP_WEP40 = 0x00000010
# NM_802_11_AP_SEC_GROUP_WEP104 = 0x00000020
# NM_802_11_AP_SEC_GROUP_TKIP = 0x00000040
# NM_802_11_AP_SEC_GROUP_CCMP = 0x00000080
# NM_802_11_AP_SEC_KEY_MGMT_PSK = 0x00000100
# NM_802_11_AP_SEC_KEY_MGMT_802_1X = 0x00000200
#
# NAME_MAP = {
# NM_802_11_AP_SEC_KEY_MGMT_PSK: {
# 'key-mgmt': 'wpa-psk',
# 'auth-alg': 'open'
# },
# }
#
# Path: dbusmock/templates/networkmanager.py
# class InfrastructureMode:
# '''Infrastructure mode
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NM80211Mode
# '''
# NM_802_11_MODE_UNKNOWN = 0
# NM_802_11_MODE_ADHOC = 1
# NM_802_11_MODE_INFRA = 2
# NM_802_11_MODE_AP = 3
#
# NAME_MAP = {
# NM_802_11_MODE_UNKNOWN: 'unknown',
# NM_802_11_MODE_ADHOC: 'adhoc',
# NM_802_11_MODE_INFRA: 'infrastructure',
# NM_802_11_MODE_AP: 'access-point',
# }
#
# Path: dbusmock/templates/networkmanager.py
# class NMActiveConnectionState:
# '''Active connection state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMActiveConnectionState
# '''
# NM_ACTIVE_CONNECTION_STATE_UNKNOWN = 0
# NM_ACTIVE_CONNECTION_STATE_ACTIVATING = 1
# NM_ACTIVE_CONNECTION_STATE_ACTIVATED = 2
# NM_ACTIVE_CONNECTION_STATE_DEACTIVATING = 3
# NM_ACTIVE_CONNECTION_STATE_DEACTIVATED = 4
#
# Path: dbusmock/templates/networkmanager.py
# class NMState:
# '''Global state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMState
# '''
# NM_STATE_UNKNOWN = 0
# NM_STATE_ASLEEP = 10
# NM_STATE_DISCONNECTED = 20
# NM_STATE_DISCONNECTING = 30
# NM_STATE_CONNECTING = 40
# NM_STATE_CONNECTED_LOCAL = 50
# NM_STATE_CONNECTED_SITE = 60
# NM_STATE_CONNECTED_GLOBAL = 70
#
# Path: dbusmock/templates/networkmanager.py
# class NMConnectivityState:
# '''Connectvity state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMConnectivityState
# '''
# NM_CONNECTIVITY_UNKNOWN = 0
# NM_CONNECTIVITY_NONE = 1
# NM_CONNECTIVITY_PORTAL = 2
# NM_CONNECTIVITY_LIMITED = 3
# NM_CONNECTIVITY_FULL = 4
#
# Path: dbusmock/templates/networkmanager.py
# CSETTINGS_IFACE = 'org.freedesktop.NetworkManager.Settings.Connection'
#
# MANAGER_IFACE = 'org.freedesktop.NetworkManager'
#
# SETTINGS_OBJ = '/org/freedesktop/NetworkManager/Settings'
#
# SETTINGS_IFACE = 'org.freedesktop.NetworkManager.Settings'
. Output only the next line. | @unittest.skipUnless(have_nmcli, 'nmcli not installed') |
Based on the snippet: <|code_start|>#!/usr/bin/python3
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version. See http://www.gnu.org/copyleft/lgpl.html for the full text
# of the license.
__author__ = 'Iftikhar Ahmad'
__copyright__ = '(c) 2012 Canonical Ltd.'
tracemalloc.start(25)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
have_nmcli = shutil.which('nmcli')
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import re
import shutil
import subprocess
import sys
import unittest
import tracemalloc
import dbus
import dbus.mainloop.glib
import dbusmock
from gi.repository import GLib
from dbusmock.templates.networkmanager import DeviceState
from dbusmock.templates.networkmanager import NM80211ApSecurityFlags
from dbusmock.templates.networkmanager import InfrastructureMode
from dbusmock.templates.networkmanager import NMActiveConnectionState
from dbusmock.templates.networkmanager import NMState
from dbusmock.templates.networkmanager import NMConnectivityState
from dbusmock.templates.networkmanager import (CSETTINGS_IFACE, MANAGER_IFACE,
SETTINGS_OBJ, SETTINGS_IFACE)
and context (classes, functions, sometimes code) from other files:
# Path: dbusmock/templates/networkmanager.py
# class DeviceState:
# '''Device states
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMDeviceState
# '''
# UNKNOWN = 0
# UNMANAGED = 10
# UNAVAILABLE = 20
# DISCONNECTED = 30
# PREPARE = 40
# CONFIG = 50
# NEED_AUTH = 60
# IP_CONFIG = 70
# IP_CHECK = 80
# SECONDARIES = 90
# ACTIVATED = 100
# DEACTIVATING = 110
# FAILED = 120
#
# Path: dbusmock/templates/networkmanager.py
# class NM80211ApSecurityFlags:
# '''Security flags
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NM80211ApSecurityFlags
# '''
# NM_802_11_AP_SEC_NONE = 0x00000000
# NM_802_11_AP_SEC_PAIR_WEP40 = 0x00000001
# NM_802_11_AP_SEC_PAIR_WEP104 = 0x00000002
# NM_802_11_AP_SEC_PAIR_TKIP = 0x00000004
# NM_802_11_AP_SEC_PAIR_CCMP = 0x00000008
# NM_802_11_AP_SEC_GROUP_WEP40 = 0x00000010
# NM_802_11_AP_SEC_GROUP_WEP104 = 0x00000020
# NM_802_11_AP_SEC_GROUP_TKIP = 0x00000040
# NM_802_11_AP_SEC_GROUP_CCMP = 0x00000080
# NM_802_11_AP_SEC_KEY_MGMT_PSK = 0x00000100
# NM_802_11_AP_SEC_KEY_MGMT_802_1X = 0x00000200
#
# NAME_MAP = {
# NM_802_11_AP_SEC_KEY_MGMT_PSK: {
# 'key-mgmt': 'wpa-psk',
# 'auth-alg': 'open'
# },
# }
#
# Path: dbusmock/templates/networkmanager.py
# class InfrastructureMode:
# '''Infrastructure mode
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NM80211Mode
# '''
# NM_802_11_MODE_UNKNOWN = 0
# NM_802_11_MODE_ADHOC = 1
# NM_802_11_MODE_INFRA = 2
# NM_802_11_MODE_AP = 3
#
# NAME_MAP = {
# NM_802_11_MODE_UNKNOWN: 'unknown',
# NM_802_11_MODE_ADHOC: 'adhoc',
# NM_802_11_MODE_INFRA: 'infrastructure',
# NM_802_11_MODE_AP: 'access-point',
# }
#
# Path: dbusmock/templates/networkmanager.py
# class NMActiveConnectionState:
# '''Active connection state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMActiveConnectionState
# '''
# NM_ACTIVE_CONNECTION_STATE_UNKNOWN = 0
# NM_ACTIVE_CONNECTION_STATE_ACTIVATING = 1
# NM_ACTIVE_CONNECTION_STATE_ACTIVATED = 2
# NM_ACTIVE_CONNECTION_STATE_DEACTIVATING = 3
# NM_ACTIVE_CONNECTION_STATE_DEACTIVATED = 4
#
# Path: dbusmock/templates/networkmanager.py
# class NMState:
# '''Global state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMState
# '''
# NM_STATE_UNKNOWN = 0
# NM_STATE_ASLEEP = 10
# NM_STATE_DISCONNECTED = 20
# NM_STATE_DISCONNECTING = 30
# NM_STATE_CONNECTING = 40
# NM_STATE_CONNECTED_LOCAL = 50
# NM_STATE_CONNECTED_SITE = 60
# NM_STATE_CONNECTED_GLOBAL = 70
#
# Path: dbusmock/templates/networkmanager.py
# class NMConnectivityState:
# '''Connectvity state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMConnectivityState
# '''
# NM_CONNECTIVITY_UNKNOWN = 0
# NM_CONNECTIVITY_NONE = 1
# NM_CONNECTIVITY_PORTAL = 2
# NM_CONNECTIVITY_LIMITED = 3
# NM_CONNECTIVITY_FULL = 4
#
# Path: dbusmock/templates/networkmanager.py
# CSETTINGS_IFACE = 'org.freedesktop.NetworkManager.Settings.Connection'
#
# MANAGER_IFACE = 'org.freedesktop.NetworkManager'
#
# SETTINGS_OBJ = '/org/freedesktop/NetworkManager/Settings'
#
# SETTINGS_IFACE = 'org.freedesktop.NetworkManager.Settings'
. Output only the next line. | @unittest.skipUnless(have_nmcli, 'nmcli not installed') |
Given snippet: <|code_start|>#!/usr/bin/python3
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version. See http://www.gnu.org/copyleft/lgpl.html for the full text
# of the license.
__author__ = 'Iftikhar Ahmad'
__copyright__ = '(c) 2012 Canonical Ltd.'
tracemalloc.start(25)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
have_nmcli = shutil.which('nmcli')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import re
import shutil
import subprocess
import sys
import unittest
import tracemalloc
import dbus
import dbus.mainloop.glib
import dbusmock
from gi.repository import GLib
from dbusmock.templates.networkmanager import DeviceState
from dbusmock.templates.networkmanager import NM80211ApSecurityFlags
from dbusmock.templates.networkmanager import InfrastructureMode
from dbusmock.templates.networkmanager import NMActiveConnectionState
from dbusmock.templates.networkmanager import NMState
from dbusmock.templates.networkmanager import NMConnectivityState
from dbusmock.templates.networkmanager import (CSETTINGS_IFACE, MANAGER_IFACE,
SETTINGS_OBJ, SETTINGS_IFACE)
and context:
# Path: dbusmock/templates/networkmanager.py
# class DeviceState:
# '''Device states
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMDeviceState
# '''
# UNKNOWN = 0
# UNMANAGED = 10
# UNAVAILABLE = 20
# DISCONNECTED = 30
# PREPARE = 40
# CONFIG = 50
# NEED_AUTH = 60
# IP_CONFIG = 70
# IP_CHECK = 80
# SECONDARIES = 90
# ACTIVATED = 100
# DEACTIVATING = 110
# FAILED = 120
#
# Path: dbusmock/templates/networkmanager.py
# class NM80211ApSecurityFlags:
# '''Security flags
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NM80211ApSecurityFlags
# '''
# NM_802_11_AP_SEC_NONE = 0x00000000
# NM_802_11_AP_SEC_PAIR_WEP40 = 0x00000001
# NM_802_11_AP_SEC_PAIR_WEP104 = 0x00000002
# NM_802_11_AP_SEC_PAIR_TKIP = 0x00000004
# NM_802_11_AP_SEC_PAIR_CCMP = 0x00000008
# NM_802_11_AP_SEC_GROUP_WEP40 = 0x00000010
# NM_802_11_AP_SEC_GROUP_WEP104 = 0x00000020
# NM_802_11_AP_SEC_GROUP_TKIP = 0x00000040
# NM_802_11_AP_SEC_GROUP_CCMP = 0x00000080
# NM_802_11_AP_SEC_KEY_MGMT_PSK = 0x00000100
# NM_802_11_AP_SEC_KEY_MGMT_802_1X = 0x00000200
#
# NAME_MAP = {
# NM_802_11_AP_SEC_KEY_MGMT_PSK: {
# 'key-mgmt': 'wpa-psk',
# 'auth-alg': 'open'
# },
# }
#
# Path: dbusmock/templates/networkmanager.py
# class InfrastructureMode:
# '''Infrastructure mode
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NM80211Mode
# '''
# NM_802_11_MODE_UNKNOWN = 0
# NM_802_11_MODE_ADHOC = 1
# NM_802_11_MODE_INFRA = 2
# NM_802_11_MODE_AP = 3
#
# NAME_MAP = {
# NM_802_11_MODE_UNKNOWN: 'unknown',
# NM_802_11_MODE_ADHOC: 'adhoc',
# NM_802_11_MODE_INFRA: 'infrastructure',
# NM_802_11_MODE_AP: 'access-point',
# }
#
# Path: dbusmock/templates/networkmanager.py
# class NMActiveConnectionState:
# '''Active connection state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMActiveConnectionState
# '''
# NM_ACTIVE_CONNECTION_STATE_UNKNOWN = 0
# NM_ACTIVE_CONNECTION_STATE_ACTIVATING = 1
# NM_ACTIVE_CONNECTION_STATE_ACTIVATED = 2
# NM_ACTIVE_CONNECTION_STATE_DEACTIVATING = 3
# NM_ACTIVE_CONNECTION_STATE_DEACTIVATED = 4
#
# Path: dbusmock/templates/networkmanager.py
# class NMState:
# '''Global state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMState
# '''
# NM_STATE_UNKNOWN = 0
# NM_STATE_ASLEEP = 10
# NM_STATE_DISCONNECTED = 20
# NM_STATE_DISCONNECTING = 30
# NM_STATE_CONNECTING = 40
# NM_STATE_CONNECTED_LOCAL = 50
# NM_STATE_CONNECTED_SITE = 60
# NM_STATE_CONNECTED_GLOBAL = 70
#
# Path: dbusmock/templates/networkmanager.py
# class NMConnectivityState:
# '''Connectvity state
#
# As per https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMConnectivityState
# '''
# NM_CONNECTIVITY_UNKNOWN = 0
# NM_CONNECTIVITY_NONE = 1
# NM_CONNECTIVITY_PORTAL = 2
# NM_CONNECTIVITY_LIMITED = 3
# NM_CONNECTIVITY_FULL = 4
#
# Path: dbusmock/templates/networkmanager.py
# CSETTINGS_IFACE = 'org.freedesktop.NetworkManager.Settings.Connection'
#
# MANAGER_IFACE = 'org.freedesktop.NetworkManager'
#
# SETTINGS_OBJ = '/org/freedesktop/NetworkManager/Settings'
#
# SETTINGS_IFACE = 'org.freedesktop.NetworkManager.Settings'
which might include code, classes, or functions. Output only the next line. | @unittest.skipUnless(have_nmcli, 'nmcli not installed') |
Here is a snippet: <|code_start|>###############################################################################
#
# apogee.tools.download: download APOGEE data files
#
###############################################################################
_DR10_URL= 'http://data.sdss3.org/sas/dr10'
_DR12_URL= 'http://data.sdss3.org/sas/dr12'
_DR13_URL= 'http://data.sdss.org/sas/dr13'
_DR14_URL= 'http://data.sdss.org/sas/dr14'
_DR16_URL= 'https://data.sdss.org/sas/dr16'
_DR17_URL= 'https://data.sdss.org/sas/dr17'
_PROPRIETARY_URL= 'https://data.sdss.org/sas/apogeework'
_MAX_NTRIES= 2
_ERASESTR= " "
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
import shutil
import tempfile
import subprocess
import numpy
import astropy.io.fits as apyfits
import warnings
from apogee.tools import path
from . import read as apread
from . import read as apread
from astropy.utils.exceptions import AstropyUserWarning
and context from other files:
# Path: apogee/tools/path.py
# _APOGEE_DATA= os.getenv('SDSS_LOCAL_SAS_MIRROR')
# _APOGEE_DATA= os.getenv('APOGEE_DATA')
# _APOGEE_REDUX= os.getenv('RESULTS_VERS')
# _APOGEE_REDUX= os.getenv('APOGEE_REDUX')
# _APOGEE_ASPCAP_REDUX= os.getenv('APOGEE_ASPCAP_REDUX')
# _APOGEE_APOKASC_REDUX= os.getenv('APOGEE_APOKASC_REDUX')
# _DR10REDUX='v304'
# _DR11REDUX='v402'
# _DR12REDUX='v603'
# _DR13REDUX='l30e.2'
# _DR14REDUX='l31c.2'
# _DR16REDUX='l33'
# _DR17REDUX='dr17'
# _CURRENTREDUX='current'
# _APOGEE_REDUX= _DR12REDUX
# _APOGEE_APOKASC_REDUX= 'v7.3'
# _APOGEE_ASPCAP_REDUX= 'v0.4'
# _ASPCAP= True
# _CODEV= '1'
# if str(dr) == '10': _APOGEE_REDUX=_DR10REDUX
# elif str(dr) == '11': _APOGEE_REDUX=_DR11REDUX
# elif str(dr) == '12': _APOGEE_REDUX=_DR12REDUX
# elif str(dr) == '13': _APOGEE_REDUX=_DR13REDUX
# elif str(dr) == '14': _APOGEE_REDUX=_DR14REDUX
# elif str(dr) == '16': _APOGEE_REDUX=_DR16REDUX
# elif str(dr) == '17': _APOGEE_REDUX=_DR17REDUX
# elif str(dr) == 'current': _APOGEE_REDUX=_CURRENTREDUX
# def apallPath(visit=False):
# def allStarPath(dr=None,lite=False,_old=False,mjd=58104):
# def allVisitPath(dr=None,_old=False,mjd=58104):
# def apokascPath():
# def distPath(dr=None):
# def rcsamplePath(dr=None,_old=False):
# def astroNNPath(dr=None):
# def astroNNDistancesPath(dr=None):
# def astroNNAgesPath(dr=None):
# def obslogPath(year=None, hemisphere=None):
# def apogeeTargetDirPath(dr=None):
# def apogeePlatePath(dr=None):
# def apogeeDesignPath(dr=None):
# def apogeeFieldPath(dr=None):
# def apogeeObjectPath(field_name,dr=None):
# def aspcapStarPath(loc_id,apogee_id,telescope='apo25m',dr=None):
# def apStarPath(loc_id,apogee_id,telescope='apo25m',dr=None):
# def apVisitPath(plateid, mjd, fiberid, telescope='apo25m', dr=None):
# def modelSpecPath(lib='GK',teff=4500,logg=2.5,metals=0.,
# cfe=0.,nfe=0.,afe=0.,vmicro=2.,
# dr=None):
# def ferreModelLibraryPath(lib='GK',pca=True,sixd=True,unf=False,dr=None,
# header=False):
# def modelAtmospherePath(lib='kurucz_filled',teff=4500,logg=2.5,metals=0.,
# cfe=0.,afe=0.,vmicro=2.,dr=None):
# def linelistPath(linelist,dr=None):
# def apWavePath(chip,dr=None):
# def apLSFPath(chip,dr=None):
# def apogeeSpectroReduxDirPath(dr=None):
# def apogeeSpectroASPCAPDirPath(dr=None):
# def apogeeModelSpectroLibraryDirPath(dr=None,lib='GK'):
# def apogeeModelAtmosphereLibraryDirPath(dr=None,lib='kurucz_filled'):
# def change_dr(dr=None):
# def _default_dr():
# def _redux_dr(dr=None):
# def _py2_round(fl):
# def _modelAtmKurucz_metalsString(metals):
# def _modelAtmKurucz_cfeString(cfe,metals):
# def _modelAtmKurucz_afeString(afe,metals):
# def _modelAtmKurucz_teffString(teff):
# def _modelAtmKurucz_loggString(logg,teff):
, which may include functions, classes, or code. Output only the next line. | def allStar(dr=None,lite=False,mjd=58104): |
Using the snippet: <|code_start|>###############################################################################
# apogee.spec.lsf: Utilities to work with APOGEE LSFs
###############################################################################
try:
fitsread = fitsio.read
except ImportError:
<|code_end|>
, determine the next line of code. You have imports:
import os, os.path
import warnings
import math
import numpy
import scipy.sparse.linalg
import fitsio
import astropy.io.fits as pyfits
import apogee.tools.read as apread
import apogee.tools.path as appath
from functools import wraps
from scipy import special, interpolate, sparse, ndimage
from apogee.tools.download import _download_file
from apogee.spec.plot import apStarWavegrid
and context (class names, function names, or code) available:
# Path: apogee/tools/download.py
# def _download_file(downloadPath,filePath,dr,verbose=False,spider=False):
# sys.stdout.write('\r'+"Downloading file %s ...\r" \
# % (os.path.basename(filePath)))
# sys.stdout.flush()
# try:
# # make all intermediate directories
# os.makedirs(os.path.dirname(filePath))
# except OSError: pass
# # Safe way of downloading
# downloading= True
# interrupted= False
# file, tmp_savefilename= tempfile.mkstemp()
# os.close(file) #Easier this way
# ntries= 1
# while downloading:
# try:
# cmd= ['wget','%s' % downloadPath,
# '-O','%s' % tmp_savefilename,
# '--read-timeout=10',
# '--tries=3']
# if not verbose: cmd.append('-q')
# if spider: cmd.append('--spider')
# subprocess.check_call(cmd)
# if not spider: shutil.move(tmp_savefilename,filePath)
# downloading= False
# if interrupted:
# raise KeyboardInterrupt
# except subprocess.CalledProcessError as e:
# if not downloading: #Assume KeyboardInterrupt
# raise
# elif 'exit status 5' in str(e):
# raise IOError("Download failed because of wget SSL certification error; you can turn off SSL certification checking by setting the option\n\ncheck_certificate = off\n\nin the file $HOME/.wgetrc (create this if it does not exist)")
# elif ntries > _MAX_NTRIES:
# raise IOError('File %s does not appear to exist on the server (as %s) ...' % (os.path.basename(filePath),downloadPath))
# elif not 'exit status 4' in str(e):
# interrupted= True
# os.remove(tmp_savefilename)
# except OSError as e:
# if e.errno == os.errno.ENOENT:
# raise OSError("Automagically downloading catalogs and data files requires the wget program; please install wget and try again...")
# else:
# raise
# finally:
# if os.path.exists(tmp_savefilename):
# os.remove(tmp_savefilename)
# # Try the mirror and the data both
# if ntries % 2 == 1:
# downloadPath= downloadPath.replace('data.sdss','mirror.sdss')
# else:
# downloadPath= downloadPath.replace('mirror.sdss','data.sdss')
# ntries+= 1
# sys.stdout.write('\r'+_ERASESTR+'\r')
# sys.stdout.flush()
# return None
#
# Path: apogee/spec/plot.py
# _LAMBDASUB= 15000
# _STARTENDSKIP= 30
# def specPlotInputDecorator(func):
# def input_wrapper(*args,**kwargs):
# def waveregions(*args,**kwargs):
# def detector(*args,**kwargs):
# def windows(*args,**kwargs):
# def highres(*args,**kwargs):
# def highres2pdf(*args,**kwargs):
# def elements(elem,*args,**kwargs):
# def _mark_lines(linewavs,wavemin,wavemax,thisax,lams,spec):
# def _label_all_lines(wavemin,wavemax,thisax,lams,spec,noMolecLines=False):
# def _label_lines(elem,wavemin,wavemax,thisax,lams,spec):
. Output only the next line. | fitsread= pyfits.getdata |
Given snippet: <|code_start|>###############################################################################
# apogee.spec.lsf: Utilities to work with APOGEE LSFs
###############################################################################
try:
fitsread = fitsio.read
except ImportError:
fitsread= pyfits.getdata
_SQRTTWO= numpy.sqrt(2.)
# Load wavelength solutions
_WAVEPIX_A= apread.apWave('a',ext=2)
_WAVEPIX_B= apread.apWave('b',ext=2)
_WAVEPIX_C= apread.apWave('c',ext=2)
def convolve(wav,spec,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os, os.path
import warnings
import math
import numpy
import scipy.sparse.linalg
import fitsio
import astropy.io.fits as pyfits
import apogee.tools.read as apread
import apogee.tools.path as appath
from functools import wraps
from scipy import special, interpolate, sparse, ndimage
from apogee.tools.download import _download_file
from apogee.spec.plot import apStarWavegrid
and context:
# Path: apogee/tools/download.py
# def _download_file(downloadPath,filePath,dr,verbose=False,spider=False):
# sys.stdout.write('\r'+"Downloading file %s ...\r" \
# % (os.path.basename(filePath)))
# sys.stdout.flush()
# try:
# # make all intermediate directories
# os.makedirs(os.path.dirname(filePath))
# except OSError: pass
# # Safe way of downloading
# downloading= True
# interrupted= False
# file, tmp_savefilename= tempfile.mkstemp()
# os.close(file) #Easier this way
# ntries= 1
# while downloading:
# try:
# cmd= ['wget','%s' % downloadPath,
# '-O','%s' % tmp_savefilename,
# '--read-timeout=10',
# '--tries=3']
# if not verbose: cmd.append('-q')
# if spider: cmd.append('--spider')
# subprocess.check_call(cmd)
# if not spider: shutil.move(tmp_savefilename,filePath)
# downloading= False
# if interrupted:
# raise KeyboardInterrupt
# except subprocess.CalledProcessError as e:
# if not downloading: #Assume KeyboardInterrupt
# raise
# elif 'exit status 5' in str(e):
# raise IOError("Download failed because of wget SSL certification error; you can turn off SSL certification checking by setting the option\n\ncheck_certificate = off\n\nin the file $HOME/.wgetrc (create this if it does not exist)")
# elif ntries > _MAX_NTRIES:
# raise IOError('File %s does not appear to exist on the server (as %s) ...' % (os.path.basename(filePath),downloadPath))
# elif not 'exit status 4' in str(e):
# interrupted= True
# os.remove(tmp_savefilename)
# except OSError as e:
# if e.errno == os.errno.ENOENT:
# raise OSError("Automagically downloading catalogs and data files requires the wget program; please install wget and try again...")
# else:
# raise
# finally:
# if os.path.exists(tmp_savefilename):
# os.remove(tmp_savefilename)
# # Try the mirror and the data both
# if ntries % 2 == 1:
# downloadPath= downloadPath.replace('data.sdss','mirror.sdss')
# else:
# downloadPath= downloadPath.replace('mirror.sdss','data.sdss')
# ntries+= 1
# sys.stdout.write('\r'+_ERASESTR+'\r')
# sys.stdout.flush()
# return None
#
# Path: apogee/spec/plot.py
# _LAMBDASUB= 15000
# _STARTENDSKIP= 30
# def specPlotInputDecorator(func):
# def input_wrapper(*args,**kwargs):
# def waveregions(*args,**kwargs):
# def detector(*args,**kwargs):
# def windows(*args,**kwargs):
# def highres(*args,**kwargs):
# def highres2pdf(*args,**kwargs):
# def elements(elem,*args,**kwargs):
# def _mark_lines(linewavs,wavemin,wavemax,thisax,lams,spec):
# def _label_all_lines(wavemin,wavemax,thisax,lams,spec,noMolecLines=False):
# def _label_lines(elem,wavemin,wavemax,thisax,lams,spec):
which might include code, classes, or functions. Output only the next line. | lsf=None,xlsf=None,dxlsf=None,fiber='combo', |
Using the snippet: <|code_start|># coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
class GeneratorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = np.random.rand(100, 3) * 10 - 5
cls.df = pd.DataFrame(cls.data, columns=["x", "y", "z"])
func_dict = {"sin": "np.sin",
"sum": "lambda d: d.sum(axis=1)",
"nest": "lambda d: np.log(np.exp(d['x']))"}
cls.generator = FuncGenerator(func_dict=func_dict)
def test_describe(self):
results = self.generator.describe(self.df)
np.testing.assert_array_equal(np.sin(self.data),
results[["sin x", "sin y", "sin z"]])
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen import Structure
from veidt.describer.general import FuncGenerator, MultiDescriber
from veidt.describer.structural_describer import DistinctSiteProperty
and context (class names, function names, or code) available:
# Path: veidt/describer/general.py
# class FuncGenerator(Describer):
# """
# General transformer for arrays. In principle, any numerical
# operations can be done as long as each involved function has a
# NumPy.ufunc implementation, e.g., np.sin, np.exp...
# """
#
# def __init__(self, func_dict, append=True):
# """
# :param func_dict: Dict with labels as keys and stringified
# function as values. The functions arerecovered from strings
# using eval() built-in function. All functions should be
# pointing to a NumPy.ufunc since the calculations will be
# performed on array-like objects. For functions implemented
# elsewhere other than in NumPy, e.g., functions in
# scipy.special, please make sure the module is imported.
# :param append: Whether return the full DataFrame with inputs.
# Default to True.
# """
# self.func_dict = func_dict
# self.append = append
#
# def describe(self, df):
# """
# Returns description of an object based on all functions.
#
# :param df: DataFrame with input data.
# :return: DataFrame with transformed data.
# """
# collector = []
# for k, v in self.func_dict.items():
# data = eval(v)(df)
# if isinstance(data, pd.Series):
# data.name = k
# elif isinstance(data, pd.DataFrame):
# columns = [k + " " + c for c in data.columns]
# data.columns = columns
# collector.append(data)
# new_df = pd.concat(collector, axis=1)
# if self.append:
# new_df = df.join(new_df)
# return new_df
#
# class MultiDescriber(Describer):
# """
# This is a generic multiple describer that allows one to combine multiple
# describers.
# """
#
# def __init__(self, describers):
# """
# :param describers: List of describers. Note that the application of the
# Describers is from left to right. E.g., [Describer1(), Describer2()]
# will run Describer1.describe on the input object, and then run
# Describer2 on the output from Describer1.describe. This provides
# a powerful way to combine multiple describers to generate generic
# descriptors and basis functions.
# """
# self.describers = describers
#
# def describe(self, obj):
# desc = obj
# for d in self.describers:
# desc = d.describe(desc)
# return desc
#
# Path: veidt/describer/structural_describer.py
# class DistinctSiteProperty(Describer):
# """
# Constructs a describer based on properties of distinct sites in a
# structure. For now, this assumes that there is only one type of species in
# a particular Wyckoff site.
# """
#
# # todo: generalize to multiple sites with the same Wyckoff.
#
# def fit(self, structures, target=None):
# return self
#
# def __init__(self, wyckoffs, properties, symprec=0.1):
# """
# :param wyckoffs: List of wyckoff symbols. E.g., ["48a", "24c"]
# :param properties: Sequence of specie properties. E.g.,
# ["atomic_radius"]. Look at pymatgen.core.periodic_table.Element and
# pymatgen.core.periodic_table.Specie for support properties (there
# are a lot!)
# :param symprec: Symmetry precision for spacegroup determination.
# """
# self.wyckoffs = wyckoffs
# self.properties = properties
# self.symprec = symprec
#
# def describe(self, structure):
# a = SpacegroupAnalyzer(structure, self.symprec)
# symm = a.get_symmetrized_structure()
# data = []
# names = []
# for w in self.wyckoffs:
# site = symm.equivalent_sites[symm.wyckoff_symbols.index(w)][0]
# for p in self.properties:
# data.append(getattr(site.specie, p))
# names.append("%s-%s" % (w, p))
# return pd.DataFrame([data], columns=names)
. Output only the next line. | np.testing.assert_array_equal(np.sum(self.data, axis=1), |
Predict the next line after this snippet: <|code_start|> results = self.generator.describe(self.df)
np.testing.assert_array_equal(np.sin(self.data),
results[["sin x", "sin y", "sin z"]])
np.testing.assert_array_equal(np.sum(self.data, axis=1),
results["sum"])
np.testing.assert_array_almost_equal(self.data[:, 0],
results["nest"])
def test_serialize(self):
json_str = json.dumps(self.generator.as_dict())
recover = FuncGenerator.from_dict(json.loads(json_str))
class MultiDescriberTest(unittest.TestCase):
def test_describe(self):
li2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Li2O.cif"))
na2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Na2O.cif"))
d1 = DistinctSiteProperty(['8c', '4a'], ["Z", "atomic_radius"])
d2 = FuncGenerator(func_dict={"exp": "np.exp"}, append=False)
d = MultiDescriber([d1, d2])
results = d.describe(li2o)
self.assertAlmostEqual(results.iloc[0]["exp 8c-Z"], np.exp(3))
self.assertAlmostEqual(results.iloc[0]["exp 8c-atomic_radius"],
np.exp(1.45))
df = d.describe_all([li2o, na2o])
<|code_end|>
using the current file's imports:
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen import Structure
from veidt.describer.general import FuncGenerator, MultiDescriber
from veidt.describer.structural_describer import DistinctSiteProperty
and any relevant context from other files:
# Path: veidt/describer/general.py
# class FuncGenerator(Describer):
# """
# General transformer for arrays. In principle, any numerical
# operations can be done as long as each involved function has a
# NumPy.ufunc implementation, e.g., np.sin, np.exp...
# """
#
# def __init__(self, func_dict, append=True):
# """
# :param func_dict: Dict with labels as keys and stringified
# function as values. The functions arerecovered from strings
# using eval() built-in function. All functions should be
# pointing to a NumPy.ufunc since the calculations will be
# performed on array-like objects. For functions implemented
# elsewhere other than in NumPy, e.g., functions in
# scipy.special, please make sure the module is imported.
# :param append: Whether return the full DataFrame with inputs.
# Default to True.
# """
# self.func_dict = func_dict
# self.append = append
#
# def describe(self, df):
# """
# Returns description of an object based on all functions.
#
# :param df: DataFrame with input data.
# :return: DataFrame with transformed data.
# """
# collector = []
# for k, v in self.func_dict.items():
# data = eval(v)(df)
# if isinstance(data, pd.Series):
# data.name = k
# elif isinstance(data, pd.DataFrame):
# columns = [k + " " + c for c in data.columns]
# data.columns = columns
# collector.append(data)
# new_df = pd.concat(collector, axis=1)
# if self.append:
# new_df = df.join(new_df)
# return new_df
#
# class MultiDescriber(Describer):
# """
# This is a generic multiple describer that allows one to combine multiple
# describers.
# """
#
# def __init__(self, describers):
# """
# :param describers: List of describers. Note that the application of the
# Describers is from left to right. E.g., [Describer1(), Describer2()]
# will run Describer1.describe on the input object, and then run
# Describer2 on the output from Describer1.describe. This provides
# a powerful way to combine multiple describers to generate generic
# descriptors and basis functions.
# """
# self.describers = describers
#
# def describe(self, obj):
# desc = obj
# for d in self.describers:
# desc = d.describe(desc)
# return desc
#
# Path: veidt/describer/structural_describer.py
# class DistinctSiteProperty(Describer):
# """
# Constructs a describer based on properties of distinct sites in a
# structure. For now, this assumes that there is only one type of species in
# a particular Wyckoff site.
# """
#
# # todo: generalize to multiple sites with the same Wyckoff.
#
# def fit(self, structures, target=None):
# return self
#
# def __init__(self, wyckoffs, properties, symprec=0.1):
# """
# :param wyckoffs: List of wyckoff symbols. E.g., ["48a", "24c"]
# :param properties: Sequence of specie properties. E.g.,
# ["atomic_radius"]. Look at pymatgen.core.periodic_table.Element and
# pymatgen.core.periodic_table.Specie for support properties (there
# are a lot!)
# :param symprec: Symmetry precision for spacegroup determination.
# """
# self.wyckoffs = wyckoffs
# self.properties = properties
# self.symprec = symprec
#
# def describe(self, structure):
# a = SpacegroupAnalyzer(structure, self.symprec)
# symm = a.get_symmetrized_structure()
# data = []
# names = []
# for w in self.wyckoffs:
# site = symm.equivalent_sites[symm.wyckoff_symbols.index(w)][0]
# for p in self.properties:
# data.append(getattr(site.specie, p))
# names.append("%s-%s" % (w, p))
# return pd.DataFrame([data], columns=names)
. Output only the next line. | self.assertAlmostEqual(df.iloc[0]["exp 8c-Z"], np.exp(3)) |
Using the snippet: <|code_start|># coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
class GeneratorTest(unittest.TestCase):
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen import Structure
from veidt.describer.general import FuncGenerator, MultiDescriber
from veidt.describer.structural_describer import DistinctSiteProperty
and context (class names, function names, or code) available:
# Path: veidt/describer/general.py
# class FuncGenerator(Describer):
# """
# General transformer for arrays. In principle, any numerical
# operations can be done as long as each involved function has a
# NumPy.ufunc implementation, e.g., np.sin, np.exp...
# """
#
# def __init__(self, func_dict, append=True):
# """
# :param func_dict: Dict with labels as keys and stringified
# function as values. The functions arerecovered from strings
# using eval() built-in function. All functions should be
# pointing to a NumPy.ufunc since the calculations will be
# performed on array-like objects. For functions implemented
# elsewhere other than in NumPy, e.g., functions in
# scipy.special, please make sure the module is imported.
# :param append: Whether return the full DataFrame with inputs.
# Default to True.
# """
# self.func_dict = func_dict
# self.append = append
#
# def describe(self, df):
# """
# Returns description of an object based on all functions.
#
# :param df: DataFrame with input data.
# :return: DataFrame with transformed data.
# """
# collector = []
# for k, v in self.func_dict.items():
# data = eval(v)(df)
# if isinstance(data, pd.Series):
# data.name = k
# elif isinstance(data, pd.DataFrame):
# columns = [k + " " + c for c in data.columns]
# data.columns = columns
# collector.append(data)
# new_df = pd.concat(collector, axis=1)
# if self.append:
# new_df = df.join(new_df)
# return new_df
#
# class MultiDescriber(Describer):
# """
# This is a generic multiple describer that allows one to combine multiple
# describers.
# """
#
# def __init__(self, describers):
# """
# :param describers: List of describers. Note that the application of the
# Describers is from left to right. E.g., [Describer1(), Describer2()]
# will run Describer1.describe on the input object, and then run
# Describer2 on the output from Describer1.describe. This provides
# a powerful way to combine multiple describers to generate generic
# descriptors and basis functions.
# """
# self.describers = describers
#
# def describe(self, obj):
# desc = obj
# for d in self.describers:
# desc = d.describe(desc)
# return desc
#
# Path: veidt/describer/structural_describer.py
# class DistinctSiteProperty(Describer):
# """
# Constructs a describer based on properties of distinct sites in a
# structure. For now, this assumes that there is only one type of species in
# a particular Wyckoff site.
# """
#
# # todo: generalize to multiple sites with the same Wyckoff.
#
# def fit(self, structures, target=None):
# return self
#
# def __init__(self, wyckoffs, properties, symprec=0.1):
# """
# :param wyckoffs: List of wyckoff symbols. E.g., ["48a", "24c"]
# :param properties: Sequence of specie properties. E.g.,
# ["atomic_radius"]. Look at pymatgen.core.periodic_table.Element and
# pymatgen.core.periodic_table.Specie for support properties (there
# are a lot!)
# :param symprec: Symmetry precision for spacegroup determination.
# """
# self.wyckoffs = wyckoffs
# self.properties = properties
# self.symprec = symprec
#
# def describe(self, structure):
# a = SpacegroupAnalyzer(structure, self.symprec)
# symm = a.get_symmetrized_structure()
# data = []
# names = []
# for w in self.wyckoffs:
# site = symm.equivalent_sites[symm.wyckoff_symbols.index(w)][0]
# for p in self.properties:
# data.append(getattr(site.specie, p))
# names.append("%s-%s" % (w, p))
# return pd.DataFrame([data], columns=names)
. Output only the next line. | @classmethod |
Continue the code snippet: <|code_start|>
file_path = os.path.dirname(__file__)
def test_func():
return 1
class TestMetrics(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x1 = np.array([1, 2, 3])
cls.x2 = np.array([4, 5, 6])
cls.x3 = np.array([1, 1, 1])
cls.x4 = np.array([1, 2, 3])
def test_mae(self):
mae = get('mae')
self.assertEqual(mae(self.x1, self.x2), 3)
self.assertAlmostEqual(mae(self.x3, self.x4), 1)
<|code_end|>
. Use current file imports:
import unittest
import numpy as np
import os
from veidt.metrics import get, serialize, deserialize
and context (classes, functions, or code) from other files:
# Path: veidt/metrics.py
# def get(identifier):
# if isinstance(identifier, dict):
# config = {'class_name': identifier['class_name'], 'config': identifier['config']}
# return deserialize(config)
# elif isinstance(identifier, str):
# return deserialize(identifier)
# elif callable(identifier):
# return identifier
# else:
# raise ValueError('Could not interpret '
# 'metric function identifier:', identifier)
#
# def serialize(metric):
# return serialize_veidt_object(metric)
#
# def deserialize(config):
# return deserialize_veidt_object(config,
# module_objects=globals(),
# printable_module_name='metric function')
. Output only the next line. | def test_mse(self): |
Here is a snippet: <|code_start|>
file_path = os.path.dirname(__file__)
def test_func():
return 1
class TestMetrics(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x1 = np.array([1, 2, 3])
<|code_end|>
. Write the next line using the current file imports:
import unittest
import numpy as np
import os
from veidt.metrics import get, serialize, deserialize
and context from other files:
# Path: veidt/metrics.py
# def get(identifier):
# if isinstance(identifier, dict):
# config = {'class_name': identifier['class_name'], 'config': identifier['config']}
# return deserialize(config)
# elif isinstance(identifier, str):
# return deserialize(identifier)
# elif callable(identifier):
# return identifier
# else:
# raise ValueError('Could not interpret '
# 'metric function identifier:', identifier)
#
# def serialize(metric):
# return serialize_veidt_object(metric)
#
# def deserialize(config):
# return deserialize_veidt_object(config,
# module_objects=globals(),
# printable_module_name='metric function')
, which may include functions, classes, or code. Output only the next line. | cls.x2 = np.array([4, 5, 6]) |
Given the following code snippet before the placeholder: <|code_start|>
file_path = os.path.dirname(__file__)
def test_func():
return 1
class TestMetrics(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x1 = np.array([1, 2, 3])
cls.x2 = np.array([4, 5, 6])
cls.x3 = np.array([1, 1, 1])
cls.x4 = np.array([1, 2, 3])
def test_mae(self):
mae = get('mae')
self.assertEqual(mae(self.x1, self.x2), 3)
self.assertAlmostEqual(mae(self.x3, self.x4), 1)
<|code_end|>
, predict the next line using imports from the current file:
import unittest
import numpy as np
import os
from veidt.metrics import get, serialize, deserialize
and context including class names, function names, and sometimes code from other files:
# Path: veidt/metrics.py
# def get(identifier):
# if isinstance(identifier, dict):
# config = {'class_name': identifier['class_name'], 'config': identifier['config']}
# return deserialize(config)
# elif isinstance(identifier, str):
# return deserialize(identifier)
# elif callable(identifier):
# return identifier
# else:
# raise ValueError('Could not interpret '
# 'metric function identifier:', identifier)
#
# def serialize(metric):
# return serialize_veidt_object(metric)
#
# def deserialize(config):
# return deserialize_veidt_object(config,
# module_objects=globals(),
# printable_module_name='metric function')
. Output only the next line. | def test_mse(self): |
Given snippet: <|code_start|>
def test_func():
return 1
class DummyClass:
def __init__(self):
self.name = 'dummy'
def get_config(self):
return {"config": "Dummyclass config"}
class TestGeneralUtil(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x1 = np.array([1, 2, 3])
cls.x2 = np.array([4, 5, 6])
cls.x3 = np.array([1, 1, 1])
cls.x4 = np.array([1, 2, 3])
def test_serialization(self):
self.assertEqual(serialize_veidt_object(test_func), "test_func")
self.assertEqual(serialize_veidt_object(DummyClass())['class_name'], "DummyClass")
self.assertIsNone(serialize_veidt_object(None))
with self.assertRaises(ValueError):
serialize_veidt_object("Not a object")
def test_deserialization(self):
self.assertEqual(1, deserialize_veidt_object('test_func', module_objects=globals())())
self.assertIsInstance(deserialize_veidt_object({"class_name": "DummyClass",
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import numpy as np
import os
from veidt.utils.general_utils import serialize_veidt_object, deserialize_veidt_object
and context:
# Path: veidt/utils/general_utils.py
# def serialize_veidt_object(instance):
# if instance is None:
# return None
# if hasattr(instance, 'get_config'):
# return {
# 'class_name': instance.__class__.__name__,
# 'config': instance.get_config()
# }
# if hasattr(instance, '__name__'):
# return instance.__name__
# else:
# raise ValueError('Cannot serialize', instance)
#
# def deserialize_veidt_object(identifier, module_objects=None,
# printable_module_name='object'):
# if isinstance(identifier, dict):
# # dealing with configuration dictionary
# config = identifier
# if 'class_name' not in config or 'config' not in config:
# raise ValueError('Improper config format: ' + str(config))
# class_name = config['class_name']
# module_objects = module_objects or {}
# cls = module_objects.get(class_name)
# if cls is None:
# raise ValueError('Unknown ' + printable_module_name +
# ': ' + class_name)
# return cls(**config['config'])
#
# elif isinstance(identifier, str):
# function_name = identifier
# fn = module_objects.get(function_name)
# if fn is None:
# raise ValueError('Unknown ' + printable_module_name +
# ':' + function_name)
# return fn
which might include code, classes, or functions. Output only the next line. | "config": {}}, module_objects=globals()), DummyClass) |
Predict the next line for this snippet: <|code_start|>
# now the kernels are defined as functions
# For future development the kernel should be class
# with tunable parameters, this is particular useful for Bayesian methods
def rbf(x1, x2, sigma):
d_squared = np.sum((x1[:, None, :] - x2[None, :, :]) ** 2, axis=2)
return np.exp(-d_squared / (2 * sigma ** 2))
def get_kernel(identifier):
if isinstance(identifier, dict):
config = {'class_name': identifier['class_name'], 'config': identifier['config']}
return deserialize_veidt_object(config, module_objects=globals())
elif isinstance(identifier, str):
return deserialize_veidt_object(identifier, module_objects=globals())
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
<|code_end|>
with the help of current file imports:
import numpy as np
from veidt.utils.general_utils import deserialize_veidt_object
and context from other files:
# Path: veidt/utils/general_utils.py
# def deserialize_veidt_object(identifier, module_objects=None,
# printable_module_name='object'):
# if isinstance(identifier, dict):
# # dealing with configuration dictionary
# config = identifier
# if 'class_name' not in config or 'config' not in config:
# raise ValueError('Improper config format: ' + str(config))
# class_name = config['class_name']
# module_objects = module_objects or {}
# cls = module_objects.get(class_name)
# if cls is None:
# raise ValueError('Unknown ' + printable_module_name +
# ': ' + class_name)
# return cls(**config['config'])
#
# elif isinstance(identifier, str):
# function_name = identifier
# fn = module_objects.get(function_name)
# if fn is None:
# raise ValueError('Unknown ' + printable_module_name +
# ':' + function_name)
# return fn
, which may contain function names, class names, or code. Output only the next line. | 'metric function identifier:', identifier) |
Predict the next line for this snippet: <|code_start|> for d in self.test_pool:
self.test_structures.append(d['structure'])
self.test_energies.append(d['outputs']['energy'])
self.test_forces.append(d['outputs']['forces'])
self.test_stresses.append(d['outputs']['virial_stress'])
def test_pool_from(self):
test_pool = pool_from(self.test_structures, self.test_energies,
self.test_forces, self.test_stresses)
for p1, p2 in zip(test_pool, self.test_pool):
self.assertEqual(p1['outputs']['energy'], p2['outputs']['energy'])
self.assertEqual(p1['outputs']['forces'], p2['outputs']['forces'])
self.assertEqual(p1['outputs']['virial_stress'],
p2['outputs']['virial_stress'])
def test_convert_docs(self):
_, df = convert_docs(self.test_pool, include_stress=False)
test_energies = df[df['dtype'] == 'energy']['y_orig']
self.assertFalse(np.any(test_energies - self.test_energies))
test_forces = df[df['dtype'] == 'force']['y_orig']
for force1, force2 in zip(test_forces, np.array(self.test_forces).ravel()):
self.assertEqual(force1, force2)
_, df = convert_docs(self.test_pool, include_stress=True)
test_energies = df[df['dtype'] == 'energy']['y_orig']
self.assertFalse(np.any(test_energies - self.test_energies))
test_forces = df[df['dtype'] == 'force']['y_orig']
for force1, force2 in zip(test_forces, np.array(self.test_forces).ravel()):
self.assertEqual(force1, force2)
test_stresses = df[df['dtype'] == 'stress']['y_orig']
<|code_end|>
with the help of current file imports:
import os
import shutil
import tempfile
import unittest
import numpy as np
from monty.serialization import loadfn
from veidt.potential.processing import pool_from, convert_docs
and context from other files:
# Path: veidt/potential/processing.py
# def pool_from(structures, energies=None, forces=None, stresses=None):
# """
# Method to convert structures and their properties in to
# datapool format.
# Args:
# structures ([Structure]): The list of Pymatgen Structure object.
# energies ([float]): The list of total energies of each structure
# in structures list.
# forces ([np.array]): List of (m, 3) forces array of each structure
# with m atoms in structures list. m can be varied with each
# single structure case.
# stresses (list): List of (6, ) virial stresses of each
# structure in structures list.
# Returns:
# ([dict])
# """
# energies = energies if energies else [None] * len(structures)
# forces = forces if forces else [None] * len(structures)
# stresses = stresses if stresses else [None] * len(structures)
# datapool = [doc_from(structure, energy, force, stress)
# for structure, energy, force, stress
# in zip(structures, energies, forces, stresses)]
# return datapool
#
# def convert_docs(docs, include_stress=False, **kwargs):
# """
# Method to convert a list of docs into objects, e.g.,
# Structure and DataFrame.
# Args:
# docs ([dict]): List of docs. Each doc should have the same
# format as one returned from .dft.parse_dir.
# include_stress (bool): Whether to include stress.
# Returns:
# A list of structures, and a DataFrame with energy and force
# data in 'y_orig' column, data type ('energy' or 'force') in
# 'dtype' column, No. of atoms in 'n' column sharing the same row
# of energy data while 'n' being 1 for the rows of force data.
# """
# structures, y_orig, n, dtype = [], [], [], []
# for d in docs:
# if isinstance(d['structure'], dict):
# structure = Structure.from_dict(d['structure'])
# else:
# structure = d['structure']
# outputs = d['outputs']
# force_arr = np.array(outputs['forces'])
# assert force_arr.shape == (len(structure), 3), \
# 'Wrong force array not matching structure'
# force_arr = force_arr.ravel()
#
# if include_stress:
# stress_arr = np.array(outputs['virial_stress'])
# y = np.concatenate(([outputs['energy']], force_arr, stress_arr))
# n.append(np.insert(np.ones(len(y) - 1), 0, d['num_atoms']))
# dtype.extend(['energy'] + ['force'] * len(force_arr) + ['stress'] * 6)
# else:
# y = np.concatenate(([outputs['energy']], force_arr))
# n.append(np.insert(np.ones(len(y) - 1), 0, d['num_atoms']))
# dtype.extend(['energy'] + ['force'] * len(force_arr))
# y_orig.append(y)
# structures.append(structure)
# df = pd.DataFrame(dict(y_orig=np.concatenate(y_orig), n=np.concatenate(n),
# dtype=dtype))
# for k, v in kwargs.items():
# df[k] = v
# return structures, df
, which may contain function names, class names, or code. Output only the next line. | for stress1, stress2 in zip(test_stresses, np.array(self.test_stresses).ravel()): |
Next line prediction: <|code_start|># coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
CWD = os.getcwd()
test_datapool = loadfn(os.path.join(os.path.dirname(__file__), 'datapool.json'))
class PorcessingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.this_dir = os.path.dirname(os.path.abspath(__file__))
cls.test_dir = tempfile.mkdtemp()
os.chdir(cls.test_dir)
@classmethod
def tearDownClass(cls):
os.chdir(CWD)
shutil.rmtree(cls.test_dir)
def setUp(self):
self.test_pool = test_datapool
self.test_structures = []
self.test_energies = []
self.test_forces = []
<|code_end|>
. Use current file imports:
(import os
import shutil
import tempfile
import unittest
import numpy as np
from monty.serialization import loadfn
from veidt.potential.processing import pool_from, convert_docs)
and context including class names, function names, or small code snippets from other files:
# Path: veidt/potential/processing.py
# def pool_from(structures, energies=None, forces=None, stresses=None):
# """
# Method to convert structures and their properties in to
# datapool format.
# Args:
# structures ([Structure]): The list of Pymatgen Structure object.
# energies ([float]): The list of total energies of each structure
# in structures list.
# forces ([np.array]): List of (m, 3) forces array of each structure
# with m atoms in structures list. m can be varied with each
# single structure case.
# stresses (list): List of (6, ) virial stresses of each
# structure in structures list.
# Returns:
# ([dict])
# """
# energies = energies if energies else [None] * len(structures)
# forces = forces if forces else [None] * len(structures)
# stresses = stresses if stresses else [None] * len(structures)
# datapool = [doc_from(structure, energy, force, stress)
# for structure, energy, force, stress
# in zip(structures, energies, forces, stresses)]
# return datapool
#
# def convert_docs(docs, include_stress=False, **kwargs):
# """
# Method to convert a list of docs into objects, e.g.,
# Structure and DataFrame.
# Args:
# docs ([dict]): List of docs. Each doc should have the same
# format as one returned from .dft.parse_dir.
# include_stress (bool): Whether to include stress.
# Returns:
# A list of structures, and a DataFrame with energy and force
# data in 'y_orig' column, data type ('energy' or 'force') in
# 'dtype' column, No. of atoms in 'n' column sharing the same row
# of energy data while 'n' being 1 for the rows of force data.
# """
# structures, y_orig, n, dtype = [], [], [], []
# for d in docs:
# if isinstance(d['structure'], dict):
# structure = Structure.from_dict(d['structure'])
# else:
# structure = d['structure']
# outputs = d['outputs']
# force_arr = np.array(outputs['forces'])
# assert force_arr.shape == (len(structure), 3), \
# 'Wrong force array not matching structure'
# force_arr = force_arr.ravel()
#
# if include_stress:
# stress_arr = np.array(outputs['virial_stress'])
# y = np.concatenate(([outputs['energy']], force_arr, stress_arr))
# n.append(np.insert(np.ones(len(y) - 1), 0, d['num_atoms']))
# dtype.extend(['energy'] + ['force'] * len(force_arr) + ['stress'] * 6)
# else:
# y = np.concatenate(([outputs['energy']], force_arr))
# n.append(np.insert(np.ones(len(y) - 1), 0, d['num_atoms']))
# dtype.extend(['energy'] + ['force'] * len(force_arr))
# y_orig.append(y)
# structures.append(structure)
# df = pd.DataFrame(dict(y_orig=np.concatenate(y_orig), n=np.concatenate(n),
# dtype=dtype))
# for k, v in kwargs.items():
# df[k] = v
# return structures, df
. Output only the next line. | self.test_stresses = [] |
Here is a snippet: <|code_start|> :return: list, a list of species string
"""
return [i.specie.name for i in self.structure if i.specie.name in self.species_map.values()]
def copy(self):
"""
Copy a new StateStructure
:return: StateStructure
"""
return self.__class__(self.structure.copy(), self.state_dict.copy(), self.species_map)
def __str__(self):
"""
string representation of the StateStructure
:return: string
"""
return ' '.join([str(i) + str(j.state) for i, j in self.state_dict.items() if i != 'temperature'])
class Chain(object):
"""
A chain of states class
To do, need to check states variable changes over steps"""
def __init__(self):
self.chain = defaultdict(list)
self.length = 0
self.current_state = None
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import itertools
from collections import defaultdict
from .base import State, StateStructure, StaticState
and context from other files:
# Path: veidt/monte_carlo/base.py
# class State(metaclass=ABCMeta):
# def __init__(self, state, name=None):
# """
# Base class for State.
# :param state: scalar or iterables
# :param name: string name
# """
# self.state = state
# self.check_state()
# self.name = name
# self.label = name
#
# @abstractmethod
# def change(self):
# """
# Change state to new state
# :return:
# """
# pass
#
# def check_state(self):
# """
# Check if the state make sense, raise Error if not
# :return:
# """
# pass
#
# def __eq__(self, other):
# """
# Compare two states
# :param other: other state object
# :return: bool
# """
# if isinstance(self.state, Iterable):
# return all([i == j for i, j in zip(self.state, other.state)])
# else:
# return self.state == other.state
#
# def copy(self):
# """
# copy a state
# :return: new state object with same state variable
# """
# new_state = self.__class__(copy(self.state), self.name)
#
# # copy other attributes
# # new_state.__dict__.update({i: j for i, j in self.__dict__.items() if i not in ['state', 'name']})
# return new_state
#
# class StateStructure(metaclass=ABCMeta):
# """
# Structure with StateDict to describe the states
# Each structure will be associated with a collection of state and can be converted to or from the states
# """
#
# def __init__(self, structure, state_dict):
# """
#
# :param structure: pymatgen structure
# :param state_dict: StateDict object
# """
# self.structure = structure.copy()
# self.state_dict = state_dict.copy()
#
# @abstractmethod
# def structure_from_states(self, state_dict):
# """
# Convert the state into pymatgen structure
# : param state_dict: StateDict object
# :return: structure corresponding to the state dictionary
# """
# pass
#
# @abstractmethod
# def structure_to_states(self, structure):
# """
# Convert structure to corresponding state dictionary
# :param structure: pymatgen structure
# :return: state dictionary
# """
# pass
#
# def to_states(self):
# """
# Convert the object to state dictionary
# :return: StateDict object
# """
# return self.structure_to_states(self.structure)
#
# def from_states(self, state_dict):
# """
# Convert a state dictionary into structure
# :param state_dict: StateDict object
# :return:
# """
# self.structure = self.structure_from_states(state_dict)
# self.state_dict = state_dict
#
# def change(self):
# """
# Perform state changes for all items in the state dictionary and
# update the structure
# :return:
# """
# [i.change() for i in self.state_dict.values()]
# self.from_states(self.state_dict)
#
# class StaticState(State):
# """
# StaticState does not change the state when calling the change method
# """
#
# def change(self):
# pass
, which may include functions, classes, or code. Output only the next line. | def append(self, state_dict): |
Here is a snippet: <|code_start|> def to_specie_list(self):
"""
Convert the spin list to species list using the species_map
:return: list, a list of species string
"""
return [i.specie.name for i in self.structure if i.specie.name in self.species_map.values()]
def copy(self):
"""
Copy a new StateStructure
:return: StateStructure
"""
return self.__class__(self.structure.copy(), self.state_dict.copy(), self.species_map)
def __str__(self):
"""
string representation of the StateStructure
:return: string
"""
return ' '.join([str(i) + str(j.state) for i, j in self.state_dict.items() if i != 'temperature'])
class Chain(object):
"""
A chain of states class
To do, need to check states variable changes over steps"""
def __init__(self):
self.chain = defaultdict(list)
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import itertools
from collections import defaultdict
from .base import State, StateStructure, StaticState
and context from other files:
# Path: veidt/monte_carlo/base.py
# class State(metaclass=ABCMeta):
# def __init__(self, state, name=None):
# """
# Base class for State.
# :param state: scalar or iterables
# :param name: string name
# """
# self.state = state
# self.check_state()
# self.name = name
# self.label = name
#
# @abstractmethod
# def change(self):
# """
# Change state to new state
# :return:
# """
# pass
#
# def check_state(self):
# """
# Check if the state make sense, raise Error if not
# :return:
# """
# pass
#
# def __eq__(self, other):
# """
# Compare two states
# :param other: other state object
# :return: bool
# """
# if isinstance(self.state, Iterable):
# return all([i == j for i, j in zip(self.state, other.state)])
# else:
# return self.state == other.state
#
# def copy(self):
# """
# copy a state
# :return: new state object with same state variable
# """
# new_state = self.__class__(copy(self.state), self.name)
#
# # copy other attributes
# # new_state.__dict__.update({i: j for i, j in self.__dict__.items() if i not in ['state', 'name']})
# return new_state
#
# class StateStructure(metaclass=ABCMeta):
# """
# Structure with StateDict to describe the states
# Each structure will be associated with a collection of state and can be converted to or from the states
# """
#
# def __init__(self, structure, state_dict):
# """
#
# :param structure: pymatgen structure
# :param state_dict: StateDict object
# """
# self.structure = structure.copy()
# self.state_dict = state_dict.copy()
#
# @abstractmethod
# def structure_from_states(self, state_dict):
# """
# Convert the state into pymatgen structure
# : param state_dict: StateDict object
# :return: structure corresponding to the state dictionary
# """
# pass
#
# @abstractmethod
# def structure_to_states(self, structure):
# """
# Convert structure to corresponding state dictionary
# :param structure: pymatgen structure
# :return: state dictionary
# """
# pass
#
# def to_states(self):
# """
# Convert the object to state dictionary
# :return: StateDict object
# """
# return self.structure_to_states(self.structure)
#
# def from_states(self, state_dict):
# """
# Convert a state dictionary into structure
# :param state_dict: StateDict object
# :return:
# """
# self.structure = self.structure_from_states(state_dict)
# self.state_dict = state_dict
#
# def change(self):
# """
# Perform state changes for all items in the state dictionary and
# update the structure
# :return:
# """
# [i.change() for i in self.state_dict.values()]
# self.from_states(self.state_dict)
#
# class StaticState(State):
# """
# StaticState does not change the state when calling the change method
# """
#
# def change(self):
# pass
, which may include functions, classes, or code. Output only the next line. | self.length = 0 |
Here is a snippet: <|code_start|>
file_path = os.path.dirname(__file__)
def test_func():
return 1
class TestKernel(unittest.TestCase):
@classmethod
<|code_end|>
. Write the next line using the current file imports:
import unittest
import numpy as np
import os
from veidt.kernel import rbf, get_kernel
and context from other files:
# Path: veidt/kernel.py
# def rbf(x1, x2, sigma):
# d_squared = np.sum((x1[:, None, :] - x2[None, :, :]) ** 2, axis=2)
# return np.exp(-d_squared / (2 * sigma ** 2))
#
# def get_kernel(identifier):
# if isinstance(identifier, dict):
# config = {'class_name': identifier['class_name'], 'config': identifier['config']}
# return deserialize_veidt_object(config, module_objects=globals())
# elif isinstance(identifier, str):
# return deserialize_veidt_object(identifier, module_objects=globals())
# elif callable(identifier):
# return identifier
# else:
# raise ValueError('Could not interpret '
# 'metric function identifier:', identifier)
, which may include functions, classes, or code. Output only the next line. | def setUpClass(cls): |
Given the following code snippet before the placeholder: <|code_start|>
file_path = os.path.dirname(__file__)
def test_func():
return 1
class TestKernel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x1 = np.array([[1, 2], [1, 2]])
cls.x2 = np.array([[2, 3], [2, 3]])
cls.sigma = 1
def test_rbf(self):
self.assertAlmostEqual(np.sum(rbf(self.x1, self.x2, self.sigma)).item(),
4.*np.exp(-2/2))
def test_get_kernel(self):
rbf2 = get_kernel('rbf')
self.assertAlmostEqual(np.sum(rbf2(self.x1, self.x2, self.sigma)).item(),
4. * np.exp(-2 / 2))
test_callable = get_kernel(test_func)
self.assertEqual(1, test_callable())
test_dict = get_kernel({"class_name": "rbf",
"config": {"x1": np.array([[1, 2], [1, 2]]),
"x2": np.array([[2, 3], [2, 3]]),
"sigma": 1}})
<|code_end|>
, predict the next line using imports from the current file:
import unittest
import numpy as np
import os
from veidt.kernel import rbf, get_kernel
and context including class names, function names, and sometimes code from other files:
# Path: veidt/kernel.py
# def rbf(x1, x2, sigma):
# d_squared = np.sum((x1[:, None, :] - x2[None, :, :]) ** 2, axis=2)
# return np.exp(-d_squared / (2 * sigma ** 2))
#
# def get_kernel(identifier):
# if isinstance(identifier, dict):
# config = {'class_name': identifier['class_name'], 'config': identifier['config']}
# return deserialize_veidt_object(config, module_objects=globals())
# elif isinstance(identifier, str):
# return deserialize_veidt_object(identifier, module_objects=globals())
# elif callable(identifier):
# return identifier
# else:
# raise ValueError('Could not interpret '
# 'metric function identifier:', identifier)
. Output only the next line. | self.assertAlmostEqual(np.sum(test_dict).item(), |
Here is a snippet: <|code_start|>
class TestMultiSpeciesNN(unittest.TestCase):
def test_create_atomic_nn(self):
keras_input = Input(shape=(None, 3))
keras_output = create_atomic_nn(keras_input, [3, 10, 1])
model = Model(inputs=keras_input, outputs=keras_output)
model.compile(loss='mse', optimizer=Adam(1e-2))
x = np.array([[[0.1, 0.2, 0.3], [0.15, 0.36, 0.6]]])
y = [1.0]
<|code_end|>
. Write the next line using the current file imports:
from veidt.model.multi_species_nn import base_model, create_atomic_nn
from keras.layers import Input, Dense, Lambda, Add, Multiply
from keras.models import Model
from keras.optimizers import Adam
import numpy as np
import unittest
and context from other files:
# Path: veidt/model/multi_species_nn.py
# def base_model(layers, species, learning_rate=1e-3):
# """
# Build a multi-specie model that predicts the energy per atom
# Since the we cannot guarantee that all structures will have the same number of atoms
# nor same number of atoms in the specific specie type, the default input x should have the dimension
# [(1, nb_specie_1, feature_dim), (1, nb_specie_2, feature_dim), ..., (1, nb_specie_n, feature_dim)]
# Essentially, the fit step takes one structure-energy/atom pair at a time.
#
# One can also group the same structure types (same nb of atoms in each specie category) together
# so that the x has the dimension
# [(nb_structure, nb_specie_1, feature_dim), (nb_structure, nb_specie_2, feature_dim), ...,
# (nb_structure, nb_specie_n, feature_dim)], in this case the target y for fitting is a numpy array having
# dimension (nb_structure, 1)
#
# :param layers: list, number of neurons in each layer for all the species
# :param species: list, list of species list
# :param learning_rate: float, learning rate for Adam optimizer
# :return: keras model
# """
# outputs = []
# inputs = []
# atom_nums = []
# for i, _ in enumerate(species):
# # create input layer for each specie
# keras_input = Input(shape=(None, layers[0]))
# # calculate the atom number for each specie
# atom_nums.append(Lambda(lambda x: K.cast(K.shape(x)[1], dtype='float'),
# output_shape=(1,))(keras_input))
# inputs.append(keras_input)
# # create the output for each specie category
# outputs.append(create_atomic_nn(keras_input, layers))
# # calculate the total energy by adding the specie total energy
# outputs = Add()(outputs)
# # calculate the total number of atoms
# total_num_atom = Add()(atom_nums)
# # calculate the energy per atom
# num_inv = Lambda(lambda x: 1. / x, output_shape=(1,))(total_num_atom)
# outputs = Multiply()([outputs, num_inv])
# # construct the keras model
# model = Model(inputs=inputs, outputs=outputs)
# # compile model with mean squared error as loss function and Adam as optimizer
# model.compile(loss='mse', optimizer=Adam(learning_rate))
# return model
#
# def create_atomic_nn(keras_input, layers):
# """
# Create a basic multi-layer perceptron model for each specie and output the sum of energies for all atoms
# of that specie.
# In the structure, the features of one specie are a [nb_atom, feature_dim] matrix,
# the output of this function is a number, i.e., the total energy.
# Therefore, the keras_input argument should have a shape of [None, feature_dim].
#
# :param keras_input: Keras layer, the input dimension is [None, feature_dim], where the None dimension is the nb of
# atoms
# :param layers: list, layer configuration, e.g., [30, 31, 32, 1] indicates
# that the input dimension is 30, with two hidden layers having 31 and 32 units and 1 output
# :return: Keras layer, layer object for latter steps
# """
# for i in range(len(layers) - 2):
# keras_input = Dense(layers[i + 1], activation='relu')(keras_input)
# keras_input = Dense(layers[-1])(keras_input)
# keras_output = Lambda(lambda x: K.sum(x, axis=1), output_shape=(1,))(keras_input)
# return keras_output
, which may include functions, classes, or code. Output only the next line. | model.fit(x, y, epochs=100, verbose=False) |
Continue the code snippet: <|code_start|>
class TestMultiSpeciesNN(unittest.TestCase):
def test_create_atomic_nn(self):
keras_input = Input(shape=(None, 3))
keras_output = create_atomic_nn(keras_input, [3, 10, 1])
model = Model(inputs=keras_input, outputs=keras_output)
model.compile(loss='mse', optimizer=Adam(1e-2))
x = np.array([[[0.1, 0.2, 0.3], [0.15, 0.36, 0.6]]])
y = [1.0]
model.fit(x, y, epochs=100, verbose=False)
pred = model.predict(x)
#print(pred)
self.assertAlmostEqual(y[0], pred[0][0], 1)
def test_base_model(self):
model = base_model([3, 10, 1], ['A', 'B'], learning_rate=1e-2)
# this simulates the case where there are 4 atom A and 6 atom B in
# the structure and the energy per atom is 0.1
features = [np.random.randn(1, 4, 3), np.random.randn(1, 6, 3)]
outputs = [0.1]
<|code_end|>
. Use current file imports:
from veidt.model.multi_species_nn import base_model, create_atomic_nn
from keras.layers import Input, Dense, Lambda, Add, Multiply
from keras.models import Model
from keras.optimizers import Adam
import numpy as np
import unittest
and context (classes, functions, or code) from other files:
# Path: veidt/model/multi_species_nn.py
# def base_model(layers, species, learning_rate=1e-3):
# """
# Build a multi-specie model that predicts the energy per atom
# Since the we cannot guarantee that all structures will have the same number of atoms
# nor same number of atoms in the specific specie type, the default input x should have the dimension
# [(1, nb_specie_1, feature_dim), (1, nb_specie_2, feature_dim), ..., (1, nb_specie_n, feature_dim)]
# Essentially, the fit step takes one structure-energy/atom pair at a time.
#
# One can also group the same structure types (same nb of atoms in each specie category) together
# so that the x has the dimension
# [(nb_structure, nb_specie_1, feature_dim), (nb_structure, nb_specie_2, feature_dim), ...,
# (nb_structure, nb_specie_n, feature_dim)], in this case the target y for fitting is a numpy array having
# dimension (nb_structure, 1)
#
# :param layers: list, number of neurons in each layer for all the species
# :param species: list, list of species list
# :param learning_rate: float, learning rate for Adam optimizer
# :return: keras model
# """
# outputs = []
# inputs = []
# atom_nums = []
# for i, _ in enumerate(species):
# # create input layer for each specie
# keras_input = Input(shape=(None, layers[0]))
# # calculate the atom number for each specie
# atom_nums.append(Lambda(lambda x: K.cast(K.shape(x)[1], dtype='float'),
# output_shape=(1,))(keras_input))
# inputs.append(keras_input)
# # create the output for each specie category
# outputs.append(create_atomic_nn(keras_input, layers))
# # calculate the total energy by adding the specie total energy
# outputs = Add()(outputs)
# # calculate the total number of atoms
# total_num_atom = Add()(atom_nums)
# # calculate the energy per atom
# num_inv = Lambda(lambda x: 1. / x, output_shape=(1,))(total_num_atom)
# outputs = Multiply()([outputs, num_inv])
# # construct the keras model
# model = Model(inputs=inputs, outputs=outputs)
# # compile model with mean squared error as loss function and Adam as optimizer
# model.compile(loss='mse', optimizer=Adam(learning_rate))
# return model
#
# def create_atomic_nn(keras_input, layers):
# """
# Create a basic multi-layer perceptron model for each specie and output the sum of energies for all atoms
# of that specie.
# In the structure, the features of one specie are a [nb_atom, feature_dim] matrix,
# the output of this function is a number, i.e., the total energy.
# Therefore, the keras_input argument should have a shape of [None, feature_dim].
#
# :param keras_input: Keras layer, the input dimension is [None, feature_dim], where the None dimension is the nb of
# atoms
# :param layers: list, layer configuration, e.g., [30, 31, 32, 1] indicates
# that the input dimension is 30, with two hidden layers having 31 and 32 units and 1 output
# :return: Keras layer, layer object for latter steps
# """
# for i in range(len(layers) - 2):
# keras_input = Dense(layers[i + 1], activation='relu')(keras_input)
# keras_input = Dense(layers[-1])(keras_input)
# keras_output = Lambda(lambda x: K.sum(x, axis=1), output_shape=(1,))(keras_input)
# return keras_output
. Output only the next line. | model.fit(features, outputs, epochs=100, verbose=0) |
Next line prediction: <|code_start|>
def binary_accuracy(y_true, y_pred):
return np.mean(np.array(y_true).ravel() == np.array(y_pred).ravel())
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
def serialize(metric):
return serialize_veidt_object(metric)
def deserialize(config):
return deserialize_veidt_object(config,
module_objects=globals(),
printable_module_name='metric function')
def get(identifier):
if isinstance(identifier, dict):
config = {'class_name': identifier['class_name'], 'config': identifier['config']}
return deserialize(config)
elif isinstance(identifier, str):
return deserialize(identifier)
elif callable(identifier):
<|code_end|>
. Use current file imports:
(import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from veidt.utils.general_utils import deserialize_veidt_object
from veidt.utils.general_utils import serialize_veidt_object)
and context including class names, function names, or small code snippets from other files:
# Path: veidt/utils/general_utils.py
# def deserialize_veidt_object(identifier, module_objects=None,
# printable_module_name='object'):
# if isinstance(identifier, dict):
# # dealing with configuration dictionary
# config = identifier
# if 'class_name' not in config or 'config' not in config:
# raise ValueError('Improper config format: ' + str(config))
# class_name = config['class_name']
# module_objects = module_objects or {}
# cls = module_objects.get(class_name)
# if cls is None:
# raise ValueError('Unknown ' + printable_module_name +
# ': ' + class_name)
# return cls(**config['config'])
#
# elif isinstance(identifier, str):
# function_name = identifier
# fn = module_objects.get(function_name)
# if fn is None:
# raise ValueError('Unknown ' + printable_module_name +
# ':' + function_name)
# return fn
#
# Path: veidt/utils/general_utils.py
# def serialize_veidt_object(instance):
# if instance is None:
# return None
# if hasattr(instance, 'get_config'):
# return {
# 'class_name': instance.__class__.__name__,
# 'config': instance.get_config()
# }
# if hasattr(instance, '__name__'):
# return instance.__name__
# else:
# raise ValueError('Cannot serialize', instance)
. Output only the next line. | return identifier |
Using the snippet: <|code_start|> self.model.fit(x, y)
return self
def predict(self, x):
return self.model.predict(x)
class TestDescrber(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dd = DummyDescriber()
def test_fit(self):
dd2 = self.dd.fit([1, 2, 3])
self.assertEqual(dd2, self.dd)
def test_describe(self):
result = self.dd.describe([1, 2, 3])
self.assertEqual(result.values[0], 6)
def test_describe_all(self):
results = self.dd.describe_all([[1, 1, 1], [2, 2, 2]])
self.assertListEqual(list(results.shape), [2])
results_transform = self.dd.transform([[1, 1, 1], [2, 2, 2]])
self.assertEqual(9, np.sum(results_transform))
class TestModel(unittest.TestCase):
@classmethod
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import numpy as np
import os
import pandas as pd
from veidt.abstract import Describer, Model
from sklearn.linear_model import LinearRegression
and context (class names, function names, or code) available:
# Path: veidt/abstract.py
# class Describer(BaseEstimator, MSONable, TransformerMixin):
# """
# Base class for a Describer, i.e., something that converts an object to a
# describer, typically a numerical representation useful for machine
# learning.
# """
#
# def fit(self, objs, targets=None):
# """Fit the describer.
# In the case that the describer relies on parameters calculated from training data,
# this method should be rewritten to store the fitted parameters
# """
# return self
#
# def transform(self, objs):
# """Transform the input objects"""
# return self.describe_all(objs).values
#
# @abc.abstractmethod
# def describe(self, obj):
# """
# Converts an obj to a descriptor vector.
#
# :param obj: Object
# :return: Descriptor for a structure. Recommended format is a pandas
# Dataframe object with the column names as intuitive names.
# For example, a simple site describer of the fractional coordinates
# (this is usually a bad describer, so it is just for illustration
# purposes) can be generated as::
#
# print(pd.DataFrame(s.frac_coords, columns=["a", "b", "c"]))
# a b c
# 0 0.000000 0.000000 0.000000
# 1 0.750178 0.750178 0.750178
# 2 0.249822 0.249822 0.249822
#
# Pandas dataframes can be dumped to a variety of formats (json, csv,
# etc.) easily. Note that a dataframe should be used even if you have
# only one line, i.e., do not use Series objects unless you know
# what you are doing.
# """
# pass
#
# def describe_all(self, objs):
# """
# Convenience method to convert a list of objects to a list of
# descriptors. Default implementation simply loops a call to describe, but
# in some instances, a batch implementation may be more efficient.
#
# :param objs: List of objects
#
# :return: Concatenated descriptors for all objects. Recommended format
# is a pandas DataFrame. Default implement returns a list of
# descriptors generated by a loop call to describe for each obj.
# """
# return pd.concat([self.describe(o) for o in objs])
#
# class Model(BaseEstimator, MSONable):
# """
# Abstract Base class for a Model. Basically, it usually wraps around a deep
# learning package, e.g., the Sequential Model in Keras, but provides for
# transparent conversion of arbitrary input and outputs.
# """
#
# @abc.abstractmethod
# def fit(self, features, targets, **kwargs):
# """
#
# :param features: Numerical input feature list or numpy array with dim (m, n)
# where m is the number of data and n is the feature dimension
# :param targets: Numerical output target list, or numpy array with dim (m, )
# """
# pass
#
# @abc.abstractmethod
# def predict(self, features):
# """
# Predict the values given a set of inputs based on fitted model.
#
# :param features: List of input features
#
# :return: List of output objects
# """
# pass
#
# def evaluate(self, features, targets, metrics=['mae'], multi_targets=False):
# """
# evaluate the performance of model based on metric.
# :return: dict of metric evaluations
# """
# pred_targets = self.predict(features)
# evaluation = {}
# if not multi_targets:
# targets = [targets]
# pred_targets = [pred_targets]
# for metric in metrics:
# veidt_metric = get(metric)
# evaluation[metric] = [veidt_metric(np.array(target).ravel(), np.array(pred_target).ravel()) for
# target, pred_target in zip(targets, pred_targets)]
# return evaluation
. Output only the next line. | def setUpClass(cls): |
Next line prediction: <|code_start|> def predict(self, x):
return self.model.predict(x)
class TestDescrber(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dd = DummyDescriber()
def test_fit(self):
dd2 = self.dd.fit([1, 2, 3])
self.assertEqual(dd2, self.dd)
def test_describe(self):
result = self.dd.describe([1, 2, 3])
self.assertEqual(result.values[0], 6)
def test_describe_all(self):
results = self.dd.describe_all([[1, 1, 1], [2, 2, 2]])
self.assertListEqual(list(results.shape), [2])
results_transform = self.dd.transform([[1, 1, 1], [2, 2, 2]])
self.assertEqual(9, np.sum(results_transform))
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = DummyModel()
<|code_end|>
. Use current file imports:
(import unittest
import numpy as np
import os
import pandas as pd
from veidt.abstract import Describer, Model
from sklearn.linear_model import LinearRegression)
and context including class names, function names, or small code snippets from other files:
# Path: veidt/abstract.py
# class Describer(BaseEstimator, MSONable, TransformerMixin):
# """
# Base class for a Describer, i.e., something that converts an object to a
# describer, typically a numerical representation useful for machine
# learning.
# """
#
# def fit(self, objs, targets=None):
# """Fit the describer.
# In the case that the describer relies on parameters calculated from training data,
# this method should be rewritten to store the fitted parameters
# """
# return self
#
# def transform(self, objs):
# """Transform the input objects"""
# return self.describe_all(objs).values
#
# @abc.abstractmethod
# def describe(self, obj):
# """
# Converts an obj to a descriptor vector.
#
# :param obj: Object
# :return: Descriptor for a structure. Recommended format is a pandas
# Dataframe object with the column names as intuitive names.
# For example, a simple site describer of the fractional coordinates
# (this is usually a bad describer, so it is just for illustration
# purposes) can be generated as::
#
# print(pd.DataFrame(s.frac_coords, columns=["a", "b", "c"]))
# a b c
# 0 0.000000 0.000000 0.000000
# 1 0.750178 0.750178 0.750178
# 2 0.249822 0.249822 0.249822
#
# Pandas dataframes can be dumped to a variety of formats (json, csv,
# etc.) easily. Note that a dataframe should be used even if you have
# only one line, i.e., do not use Series objects unless you know
# what you are doing.
# """
# pass
#
# def describe_all(self, objs):
# """
# Convenience method to convert a list of objects to a list of
# descriptors. Default implementation simply loops a call to describe, but
# in some instances, a batch implementation may be more efficient.
#
# :param objs: List of objects
#
# :return: Concatenated descriptors for all objects. Recommended format
# is a pandas DataFrame. Default implement returns a list of
# descriptors generated by a loop call to describe for each obj.
# """
# return pd.concat([self.describe(o) for o in objs])
#
# class Model(BaseEstimator, MSONable):
# """
# Abstract Base class for a Model. Basically, it usually wraps around a deep
# learning package, e.g., the Sequential Model in Keras, but provides for
# transparent conversion of arbitrary input and outputs.
# """
#
# @abc.abstractmethod
# def fit(self, features, targets, **kwargs):
# """
#
# :param features: Numerical input feature list or numpy array with dim (m, n)
# where m is the number of data and n is the feature dimension
# :param targets: Numerical output target list, or numpy array with dim (m, )
# """
# pass
#
# @abc.abstractmethod
# def predict(self, features):
# """
# Predict the values given a set of inputs based on fitted model.
#
# :param features: List of input features
#
# :return: List of output objects
# """
# pass
#
# def evaluate(self, features, targets, metrics=['mae'], multi_targets=False):
# """
# evaluate the performance of model based on metric.
# :return: dict of metric evaluations
# """
# pred_targets = self.predict(features)
# evaluation = {}
# if not multi_targets:
# targets = [targets]
# pred_targets = [pred_targets]
# for metric in metrics:
# veidt_metric = get(metric)
# evaluation[metric] = [veidt_metric(np.array(target).ravel(), np.array(pred_target).ravel()) for
# target, pred_target in zip(targets, pred_targets)]
# return evaluation
. Output only the next line. | def test_fit(self): |
Given snippet: <|code_start|># Generated by Django 2.2.8 on 2019-12-23 18:00
def synopsis(pr, make_searchable=False):
self = pr
def verbosify(val, units=None, pre=None, pre_whitespace=True, post=None, post_whitespace=True):
elaborated = ""
if val is not None and val != '':
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.db import migrations, models
from bom.utils import strip_trailing_zeros
and context:
# Path: bom/utils.py
# def strip_trailing_zeros(num):
# found = False
# for c in num:
# if c.isdigit():
# found = True
# elif c not in ['-', '+', '.']:
# found = False
# break
# return ('%f' % float(num)).rstrip('0').rstrip('.') if found else num
which might include code, classes, or functions. Output only the next line. | try: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.